compiler.h 12.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H

5
#include <linux/compiler_types.h>
L
Linus Torvalds 已提交
6

7
#ifndef __ASSEMBLY__
R
Rusty Russell 已提交
8

L
Linus Torvalds 已提交
9 10
#ifdef __KERNEL__

11 12 13 14
/*
 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 * to disable branch tracing on a per file basis.
 */
15 16
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18
			  int expect, int is_constant);
19 20 21 22

#define likely_notrace(x)	__builtin_expect(!!(x), 1)
#define unlikely_notrace(x)	__builtin_expect(!!(x), 0)

23
#define __branch_check__(x, expect, is_constant) ({			\
24
			long ______r;					\
25
			static struct ftrace_likely_data		\
26
				__aligned(4)				\
27
				__section(_ftrace_annotated_branch)	\
28
				______f = {				\
29 30 31
				.data.func = __func__,			\
				.data.file = __FILE__,			\
				.data.line = __LINE__,			\
32
			};						\
33 34 35
			______r = __builtin_expect(!!(x), expect);	\
			ftrace_likely_update(&______f, ______r,		\
					     expect, is_constant);	\
36 37 38 39 40 41 42 43 44
			______r;					\
		})

/*
 * Using __builtin_constant_p(x) to ignore cases where the return
 * value is always the same.  This idea is taken from a similar patch
 * written by Daniel Walker.
 */
# ifndef likely
45
#  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
46 47
# endif
# ifndef unlikely
48
#  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
49
# endif
50 51 52 53 54 55

#ifdef CONFIG_PROFILE_ALL_BRANCHES
/*
 * "Define 'is'", Bill Clinton
 * "Define 'if'", Steven Rostedt
 */
56 57 58 59 60 61 62
#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )

#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))

#define __trace_if_value(cond) ({			\
	static struct ftrace_branch_data		\
		__aligned(4)				\
63
		__section(_ftrace_branch)		\
64 65 66 67 68 69 70 71 72 73
		__if_trace = {				\
			.func = __func__,		\
			.file = __FILE__,		\
			.line = __LINE__,		\
		};					\
	(cond) ?					\
		(__if_trace.miss_hit[1]++,1) :		\
		(__if_trace.miss_hit[0]++,0);		\
})

74 75
#endif /* CONFIG_PROFILE_ALL_BRANCHES */

76 77 78 79
#else
# define likely(x)	__builtin_expect(!!(x), 1)
# define unlikely(x)	__builtin_expect(!!(x), 0)
#endif
L
Linus Torvalds 已提交
80 81 82 83 84 85

/* Optimization barrier */
#ifndef barrier
# define barrier() __memory_barrier()
#endif

86 87 88 89
#ifndef barrier_data
# define barrier_data(ptr) barrier()
#endif

90 91 92 93 94
/* workaround for GCC PR82365 if needed */
#ifndef barrier_before_unreachable
# define barrier_before_unreachable() do { } while (0)
#endif

95
/* Unreachable code */
96
#ifdef CONFIG_STACK_VALIDATION
97 98 99 100 101
/*
 * These macros help objtool understand GCC code flow for unreachable code.
 * The __COUNTER__ based labels are a hack to make each instance of the macros
 * unique, to convince GCC not to merge duplicate inline asm statements.
 */
102
#define annotate_reachable() ({						\
103 104 105 106
	asm volatile("%c0:\n\t"						\
		     ".pushsection .discard.reachable\n\t"		\
		     ".long %c0b - .\n\t"				\
		     ".popsection\n\t" : : "i" (__COUNTER__));		\
107 108
})
#define annotate_unreachable() ({					\
109 110 111 112
	asm volatile("%c0:\n\t"						\
		     ".pushsection .discard.unreachable\n\t"		\
		     ".long %c0b - .\n\t"				\
		     ".popsection\n\t" : : "i" (__COUNTER__));		\
113
})
114 115 116 117 118
#define ASM_UNREACHABLE							\
	"999:\n\t"							\
	".pushsection .discard.unreachable\n\t"				\
	".long 999b - .\n\t"						\
	".popsection\n\t"
119 120

/* Annotate a C jump table to allow objtool to follow the code flow */
121
#define __annotate_jump_table __section(.rodata..c_jump_table)
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
#ifdef CONFIG_DEBUG_ENTRY
/* Begin/end of an instrumentation safe region */
#define instrumentation_begin() ({					\
	asm volatile("%c0:\n\t"						\
		     ".pushsection .discard.instr_begin\n\t"		\
		     ".long %c0b - .\n\t"				\
		     ".popsection\n\t" : : "i" (__COUNTER__));		\
})

/*
 * Because instrumentation_{begin,end}() can nest, objtool validation considers
 * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
 * When the value is greater than 0, we consider instrumentation allowed.
 *
 * There is a problem with code like:
 *
 * noinstr void foo()
 * {
 *	instrumentation_begin();
 *	...
 *	if (cond) {
 *		instrumentation_begin();
 *		...
 *		instrumentation_end();
 *	}
 *	bar();
 *	instrumentation_end();
 * }
 *
 * If instrumentation_end() would be an empty label, like all the other
 * annotations, the inner _end(), which is at the end of a conditional block,
 * would land on the instruction after the block.
 *
 * If we then consider the sum of the !cond path, we'll see that the call to
 * bar() is with a 0-value, even though, we meant it to happen with a positive
 * value.
 *
 * To avoid this, have _end() be a NOP instruction, this ensures it will be
 * part of the condition block and does not escape.
 */
#define instrumentation_end() ({					\
	asm volatile("%c0: nop\n\t"					\
		     ".pushsection .discard.instr_end\n\t"		\
		     ".long %c0b - .\n\t"				\
		     ".popsection\n\t" : : "i" (__COUNTER__));		\
})
#endif /* CONFIG_DEBUG_ENTRY */

171 172 173
#else
#define annotate_reachable()
#define annotate_unreachable()
174
#define __annotate_jump_table
175 176
#endif

177 178 179 180 181
#ifndef instrumentation_begin
#define instrumentation_begin()		do { } while(0)
#define instrumentation_end()		do { } while(0)
#endif

K
Kees Cook 已提交
182 183 184
#ifndef ASM_UNREACHABLE
# define ASM_UNREACHABLE
#endif
185
#ifndef unreachable
186 187 188 189
# define unreachable() do {		\
	annotate_unreachable();		\
	__builtin_unreachable();	\
} while (0)
190 191
#endif

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/*
 * KENTRY - kernel entry point
 * This can be used to annotate symbols (functions or data) that are used
 * without their linker symbol being referenced explicitly. For example,
 * interrupt vector handlers, or functions in the kernel image that are found
 * programatically.
 *
 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
 * are handled in their own way (with KEEP() in linker scripts).
 *
 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
 * linker script. For example an architecture could KEEP() its entire
 * boot/exception vector code rather than annotate each function and data.
 */
#ifndef KENTRY
# define KENTRY(sym)						\
	extern typeof(sym) sym;					\
	static const unsigned long __kentry_##sym		\
	__used							\
211
	__section("___kentry" "+" #sym )			\
212 213 214
	= (unsigned long)&sym;
#endif

L
Linus Torvalds 已提交
215 216 217 218 219 220 221
#ifndef RELOC_HIDE
# define RELOC_HIDE(ptr, off)					\
  ({ unsigned long __ptr;					\
     __ptr = (unsigned long) (ptr);				\
    (typeof(ptr)) (__ptr + (off)); })
#endif

222
#ifndef OPTIMIZER_HIDE_VAR
223 224 225
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var)						\
	__asm__ ("" : "=r" (var) : "0" (var))
226 227
#endif

R
Rusty Russell 已提交
228 229 230 231 232
/* Not-quite-unique ID. */
#ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif

233 234 235
/*
 * Prevent the compiler from merging or refetching reads or writes. The
 * compiler is also forbidden from reordering successive instances of
M
Mark Rutland 已提交
236 237 238 239
 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
 * particular ordering. One way to make the compiler aware of ordering is to
 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
 * statements.
240
 *
M
Mark Rutland 已提交
241
 * These two macros will also work on aggregate data types like structs or
242
 * unions.
243 244 245
 *
 * Their two major use cases are: (1) Mediating communication between
 * process-level code and irq/NMI handlers, all running on the same CPU,
M
Mark Rutland 已提交
246
 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
247 248 249 250
 * mutilate accesses that either do not require ordering or that interact
 * with an explicit memory barrier or atomic instruction that provides the
 * required ordering.
 */
251
#include <asm/barrier.h>
252
#include <linux/kasan-checks.h>
253

254 255 256 257 258
/*
 * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
 * atomicity or dependency ordering guarantees. Note that this may result
 * in tears!
 */
259
#define __READ_ONCE(x)	(*(const volatile __unqual_scalar_typeof(x) *)&(x))
260

261
#define __READ_ONCE_SCALAR(x)						\
262
({									\
263
	__unqual_scalar_typeof(x) __x = __READ_ONCE(x);			\
264
	smp_read_barrier_depends();					\
265
	(typeof(x))__x;							\
266 267
})

268 269 270 271 272 273 274
#define READ_ONCE(x)							\
({									\
	compiletime_assert_rwonce_type(x);				\
	__READ_ONCE_SCALAR(x);						\
})

#define __WRITE_ONCE(x, val)				\
275 276 277 278
do {							\
	*(volatile typeof(x) *)&(x) = (val);		\
} while (0)

279 280 281 282 283 284
#define WRITE_ONCE(x, val)				\
do {							\
	compiletime_assert_rwonce_type(x);		\
	__WRITE_ONCE(x, val);				\
} while (0)

285
#ifdef CONFIG_KASAN
286
/*
287 288 289 290
 * We can't declare function 'inline' because __no_sanitize_address conflicts
 * with inlining. Attempt to inline it may cause a build failure.
 *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
291
 */
292 293 294 295 296 297 298 299 300 301
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif

static __no_kasan_or_inline
unsigned long __read_once_word_nocheck(const void *addr)
{
	return __READ_ONCE(*(unsigned long *)addr);
}
302 303

/*
304 305 306
 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
 * word from memory atomically but without telling KASAN. This is usually
 * used by unwinding code when walking the stack of a running process.
307
 */
308 309
#define READ_ONCE_NOCHECK(x)						\
({									\
310 311 312 313
	unsigned long __x;						\
	compiletime_assert(sizeof(x) == sizeof(__x),			\
		"Unsupported access size for READ_ONCE_NOCHECK().");	\
	__x = __read_once_word_nocheck(&(x));				\
314
	smp_read_barrier_depends();					\
315
	(typeof(x))__x;							\
316
})
317

318 319 320 321 322 323 324
static __no_kasan_or_inline
unsigned long read_word_at_a_time(const void *addr)
{
	kasan_check_read(addr, 1);
	return *(unsigned long *)addr;
}

L
Linus Torvalds 已提交
325 326
#endif /* __KERNEL__ */

327 328 329 330 331 332 333
/*
 * Force the compiler to emit 'sym' as a symbol, so that we can reference
 * it from inline assembler. Necessary in case 'sym' could be inlined
 * otherwise, or eliminated entirely due to lack of references that are
 * visible to the compiler.
 */
#define __ADDRESSABLE(sym) \
334
	static void * __section(.discard.addressable) __used \
335 336 337 338 339 340 341 342 343 344 345
		__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;

/**
 * offset_to_ptr - convert a relative memory offset to an absolute pointer
 * @off:	the address of the 32-bit offset value
 */
static inline void *offset_to_ptr(const int *off)
{
	return (void *)((unsigned long)off + *off);
}

L
Linus Torvalds 已提交
346 347
#endif /* __ASSEMBLY__ */

348 349 350 351
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
#endif
352 353 354
#ifndef __compiletime_warning
# define __compiletime_warning(message)
#endif
355 356 357
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
358

359 360
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix)		\
361 362
	do {								\
		extern void prefix ## suffix(void) __compiletime_error(msg); \
363
		if (!(condition))					\
364 365
			prefix ## suffix();				\
	} while (0)
366 367 368
#else
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
#endif
369 370 371 372 373 374 375 376 377 378 379 380 381 382

#define _compiletime_assert(condition, msg, prefix, suffix) \
	__compiletime_assert(condition, msg, prefix, suffix)

/**
 * compiletime_assert - break build and emit msg if condition is false
 * @condition: a compile-time constant condition to check
 * @msg:       a message to emit if condition is false
 *
 * In tradition of POSIX assert, this macro will break the build if the
 * supplied condition is *false*, emitting the supplied error message if the
 * compiler has support to do so.
 */
#define compiletime_assert(condition, msg) \
383
	_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
384

385 386 387 388
#define compiletime_assert_atomic_type(t)				\
	compiletime_assert(__native_word(t),				\
		"Need native word sized stores/loads for atomicity.")

389 390
/*
 * Yes, this permits 64-bit accesses on 32-bit architectures. These will
391 392 393
 * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
 * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
 * (e.g. a virtual address) and a strong prevailing wind.
394 395 396 397 398
 */
#define compiletime_assert_rwonce_type(t)					\
	compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),	\
		"Unsupported access size for {READ,WRITE}_ONCE().")

399 400 401
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))

402 403 404 405 406 407
/*
 * This is needed in functions which generate the stack canary, see
 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
 */
#define prevent_tail_call_optimization()	mb()

L
Linus Torvalds 已提交
408
#endif /* __LINUX_COMPILER_H */