compiler.h 10.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H

5
#include <linux/compiler_types.h>
L
Linus Torvalds 已提交
6

7
#ifndef __ASSEMBLY__
R
Rusty Russell 已提交
8

L
Linus Torvalds 已提交
9 10
#ifdef __KERNEL__

11 12 13 14
/*
 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 * to disable branch tracing on a per file basis.
 */
15 16
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18
			  int expect, int is_constant);
19 20 21 22

#define likely_notrace(x)	__builtin_expect(!!(x), 1)
#define unlikely_notrace(x)	__builtin_expect(!!(x), 0)

23
#define __branch_check__(x, expect, is_constant) ({			\
24
			long ______r;					\
25
			static struct ftrace_likely_data		\
26
				__aligned(4)				\
27
				__section(_ftrace_annotated_branch)	\
28
				______f = {				\
29 30 31
				.data.func = __func__,			\
				.data.file = __FILE__,			\
				.data.line = __LINE__,			\
32
			};						\
33 34 35
			______r = __builtin_expect(!!(x), expect);	\
			ftrace_likely_update(&______f, ______r,		\
					     expect, is_constant);	\
36 37 38 39 40 41 42 43 44
			______r;					\
		})

/*
 * Using __builtin_constant_p(x) to ignore cases where the return
 * value is always the same.  This idea is taken from a similar patch
 * written by Daniel Walker.
 */
# ifndef likely
45
#  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
46 47
# endif
# ifndef unlikely
48
#  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
49
# endif
50 51 52 53 54 55

#ifdef CONFIG_PROFILE_ALL_BRANCHES
/*
 * "Define 'is'", Bill Clinton
 * "Define 'if'", Steven Rostedt
 */
56 57 58 59 60 61 62
#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )

#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))

#define __trace_if_value(cond) ({			\
	static struct ftrace_branch_data		\
		__aligned(4)				\
63
		__section(_ftrace_branch)		\
64 65 66 67 68 69 70 71 72 73
		__if_trace = {				\
			.func = __func__,		\
			.file = __FILE__,		\
			.line = __LINE__,		\
		};					\
	(cond) ?					\
		(__if_trace.miss_hit[1]++,1) :		\
		(__if_trace.miss_hit[0]++,0);		\
})

74 75
#endif /* CONFIG_PROFILE_ALL_BRANCHES */

76 77 78 79
#else
# define likely(x)	__builtin_expect(!!(x), 1)
# define unlikely(x)	__builtin_expect(!!(x), 0)
#endif
L
Linus Torvalds 已提交
80 81 82 83 84 85

/* Optimization barrier */
#ifndef barrier
# define barrier() __memory_barrier()
#endif

86 87 88 89
#ifndef barrier_data
# define barrier_data(ptr) barrier()
#endif

90 91 92 93 94
/* workaround for GCC PR82365 if needed */
#ifndef barrier_before_unreachable
# define barrier_before_unreachable() do { } while (0)
#endif

95
/* Unreachable code */
96
#ifdef CONFIG_STACK_VALIDATION
97 98 99 100 101
/*
 * These macros help objtool understand GCC code flow for unreachable code.
 * The __COUNTER__ based labels are a hack to make each instance of the macros
 * unique, to convince GCC not to merge duplicate inline asm statements.
 */
102
#define annotate_reachable() ({						\
103 104 105 106
	asm volatile("%c0:\n\t"						\
		     ".pushsection .discard.reachable\n\t"		\
		     ".long %c0b - .\n\t"				\
		     ".popsection\n\t" : : "i" (__COUNTER__));		\
107 108
})
#define annotate_unreachable() ({					\
109 110 111 112
	asm volatile("%c0:\n\t"						\
		     ".pushsection .discard.unreachable\n\t"		\
		     ".long %c0b - .\n\t"				\
		     ".popsection\n\t" : : "i" (__COUNTER__));		\
113
})
114 115 116 117 118
#define ASM_UNREACHABLE							\
	"999:\n\t"							\
	".pushsection .discard.unreachable\n\t"				\
	".long 999b - .\n\t"						\
	".popsection\n\t"
119 120

/* Annotate a C jump table to allow objtool to follow the code flow */
121
#define __annotate_jump_table __section(.rodata..c_jump_table)
122

123 124 125
#else
#define annotate_reachable()
#define annotate_unreachable()
126
#define __annotate_jump_table
127 128
#endif

K
Kees Cook 已提交
129 130 131
#ifndef ASM_UNREACHABLE
# define ASM_UNREACHABLE
#endif
132
#ifndef unreachable
133 134 135 136
# define unreachable() do {		\
	annotate_unreachable();		\
	__builtin_unreachable();	\
} while (0)
137 138
#endif

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
/*
 * KENTRY - kernel entry point
 * This can be used to annotate symbols (functions or data) that are used
 * without their linker symbol being referenced explicitly. For example,
 * interrupt vector handlers, or functions in the kernel image that are found
 * programatically.
 *
 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
 * are handled in their own way (with KEEP() in linker scripts).
 *
 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
 * linker script. For example an architecture could KEEP() its entire
 * boot/exception vector code rather than annotate each function and data.
 */
#ifndef KENTRY
# define KENTRY(sym)						\
	extern typeof(sym) sym;					\
	static const unsigned long __kentry_##sym		\
	__used							\
158
	__section("___kentry" "+" #sym )			\
159 160 161
	= (unsigned long)&sym;
#endif

L
Linus Torvalds 已提交
162 163 164 165 166 167 168
#ifndef RELOC_HIDE
# define RELOC_HIDE(ptr, off)					\
  ({ unsigned long __ptr;					\
     __ptr = (unsigned long) (ptr);				\
    (typeof(ptr)) (__ptr + (off)); })
#endif

169
#ifndef OPTIMIZER_HIDE_VAR
170 171 172
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var)						\
	__asm__ ("" : "=r" (var) : "0" (var))
173 174
#endif

R
Rusty Russell 已提交
175 176 177 178 179
/* Not-quite-unique ID. */
#ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif

180 181 182
/*
 * Prevent the compiler from merging or refetching reads or writes. The
 * compiler is also forbidden from reordering successive instances of
M
Mark Rutland 已提交
183 184 185 186
 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
 * particular ordering. One way to make the compiler aware of ordering is to
 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
 * statements.
187
 *
M
Mark Rutland 已提交
188
 * These two macros will also work on aggregate data types like structs or
189
 * unions.
190 191 192
 *
 * Their two major use cases are: (1) Mediating communication between
 * process-level code and irq/NMI handlers, all running on the same CPU,
M
Mark Rutland 已提交
193
 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
194 195 196 197
 * mutilate accesses that either do not require ordering or that interact
 * with an explicit memory barrier or atomic instruction that provides the
 * required ordering.
 */
198
#include <asm/barrier.h>
199
#include <linux/kasan-checks.h>
200

201 202 203 204 205
/*
 * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
 * atomicity or dependency ordering guarantees. Note that this may result
 * in tears!
 */
206
#define __READ_ONCE(x)	(*(const volatile __unqual_scalar_typeof(x) *)&(x))
207

208
#define __READ_ONCE_SCALAR(x)						\
209
({									\
210
	__unqual_scalar_typeof(x) __x = __READ_ONCE(x);			\
211
	smp_read_barrier_depends();					\
212
	(typeof(x))__x;							\
213
})
214

215 216 217 218 219 220 221
#define READ_ONCE(x)							\
({									\
	compiletime_assert_rwonce_type(x);				\
	__READ_ONCE_SCALAR(x);						\
})

#define __WRITE_ONCE(x, val)				\
222 223 224 225
do {							\
	*(volatile typeof(x) *)&(x) = (val);		\
} while (0)

226 227 228 229 230 231
#define WRITE_ONCE(x, val)				\
do {							\
	compiletime_assert_rwonce_type(x);		\
	__WRITE_ONCE(x, val);				\
} while (0)

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
#ifdef CONFIG_KASAN
/*
 * We can't declare function 'inline' because __no_sanitize_address conflicts
 * with inlining. Attempt to inline it may cause a build failure.
 *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
 */
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif

static __no_kasan_or_inline
unsigned long __read_once_word_nocheck(const void *addr)
{
	return __READ_ONCE(*(unsigned long *)addr);
}
249 250

/*
251 252 253
 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
 * word from memory atomically but without telling KASAN. This is usually
 * used by unwinding code when walking the stack of a running process.
254
 */
255 256 257 258 259 260
#define READ_ONCE_NOCHECK(x)						\
({									\
	unsigned long __x = __read_once_word_nocheck(&(x));		\
	smp_read_barrier_depends();					\
	__x;								\
})
261

262 263 264 265 266 267 268
static __no_kasan_or_inline
unsigned long read_word_at_a_time(const void *addr)
{
	kasan_check_read(addr, 1);
	return *(unsigned long *)addr;
}

L
Linus Torvalds 已提交
269 270
#endif /* __KERNEL__ */

271 272 273 274 275 276 277
/*
 * Force the compiler to emit 'sym' as a symbol, so that we can reference
 * it from inline assembler. Necessary in case 'sym' could be inlined
 * otherwise, or eliminated entirely due to lack of references that are
 * visible to the compiler.
 */
#define __ADDRESSABLE(sym) \
278
	static void * __section(.discard.addressable) __used \
279 280 281 282 283 284 285 286 287 288 289
		__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;

/**
 * offset_to_ptr - convert a relative memory offset to an absolute pointer
 * @off:	the address of the 32-bit offset value
 */
static inline void *offset_to_ptr(const int *off)
{
	return (void *)((unsigned long)off + *off);
}

L
Linus Torvalds 已提交
290 291
#endif /* __ASSEMBLY__ */

292 293 294 295
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
#endif
296 297 298
#ifndef __compiletime_warning
# define __compiletime_warning(message)
#endif
299 300 301
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
302

303 304
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix)		\
305 306
	do {								\
		extern void prefix ## suffix(void) __compiletime_error(msg); \
307
		if (!(condition))					\
308 309
			prefix ## suffix();				\
	} while (0)
310 311 312
#else
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
#endif
313 314 315 316 317 318 319 320 321 322 323 324 325 326

#define _compiletime_assert(condition, msg, prefix, suffix) \
	__compiletime_assert(condition, msg, prefix, suffix)

/**
 * compiletime_assert - break build and emit msg if condition is false
 * @condition: a compile-time constant condition to check
 * @msg:       a message to emit if condition is false
 *
 * In tradition of POSIX assert, this macro will break the build if the
 * supplied condition is *false*, emitting the supplied error message if the
 * compiler has support to do so.
 */
#define compiletime_assert(condition, msg) \
327
	_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
328

329 330 331 332
#define compiletime_assert_atomic_type(t)				\
	compiletime_assert(__native_word(t),				\
		"Need native word sized stores/loads for atomicity.")

333 334
/*
 * Yes, this permits 64-bit accesses on 32-bit architectures. These will
335 336 337
 * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
 * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
 * (e.g. a virtual address) and a strong prevailing wind.
338 339 340 341 342
 */
#define compiletime_assert_rwonce_type(t)					\
	compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long),	\
		"Unsupported access size for {READ,WRITE}_ONCE().")

343 344 345
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))

L
Linus Torvalds 已提交
346
#endif /* __LINUX_COMPILER_H */