atomic-instrumented.h 11.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * This file provides wrappers with KASAN instrumentation for atomic operations.
 * To use this functionality an arch's atomic.h file needs to define all
 * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
 * this file at the end. This file provides atomic_read() that forwards to
 * arch_atomic_read() for actual atomic operation.
 * Note: if an arch atomic operation is implemented by means of other atomic
 * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
 * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
 * double instrumentation.
 */

13 14 15 16
#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
#define _LINUX_ATOMIC_INSTRUMENTED_H

#include <linux/build_bug.h>
17
#include <linux/kasan-checks.h>
18 19 20

static __always_inline int atomic_read(const atomic_t *v)
{
21
	kasan_check_read(v, sizeof(*v));
22 23 24 25 26
	return arch_atomic_read(v);
}

static __always_inline s64 atomic64_read(const atomic64_t *v)
{
27
	kasan_check_read(v, sizeof(*v));
28 29 30 31 32
	return arch_atomic64_read(v);
}

static __always_inline void atomic_set(atomic_t *v, int i)
{
33
	kasan_check_write(v, sizeof(*v));
34 35 36 37 38
	arch_atomic_set(v, i);
}

static __always_inline void atomic64_set(atomic64_t *v, s64 i)
{
39
	kasan_check_write(v, sizeof(*v));
40 41 42 43 44
	arch_atomic64_set(v, i);
}

static __always_inline int atomic_xchg(atomic_t *v, int i)
{
45
	kasan_check_write(v, sizeof(*v));
46 47 48 49 50
	return arch_atomic_xchg(v, i);
}

static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
{
51
	kasan_check_write(v, sizeof(*v));
52 53 54 55 56
	return arch_atomic64_xchg(v, i);
}

static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
57
	kasan_check_write(v, sizeof(*v));
58 59 60 61 62
	return arch_atomic_cmpxchg(v, old, new);
}

static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
63
	kasan_check_write(v, sizeof(*v));
64 65 66 67 68 69 70
	return arch_atomic64_cmpxchg(v, old, new);
}

#ifdef arch_atomic_try_cmpxchg
#define atomic_try_cmpxchg atomic_try_cmpxchg
static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
71 72
	kasan_check_write(v, sizeof(*v));
	kasan_check_read(old, sizeof(*old));
73 74 75 76 77 78 79 80
	return arch_atomic_try_cmpxchg(v, old, new);
}
#endif

#ifdef arch_atomic64_try_cmpxchg
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
81 82
	kasan_check_write(v, sizeof(*v));
	kasan_check_read(old, sizeof(*old));
83 84 85 86
	return arch_atomic64_try_cmpxchg(v, old, new);
}
#endif

87 88
#ifdef arch_atomic_fetch_add_unless
#define atomic_fetch_add_unless atomic_fetch_add_unless
89
static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
90
{
91
	kasan_check_write(v, sizeof(*v));
92
	return arch_atomic_fetch_add_unless(v, a, u);
93
}
94
#endif
95 96 97

static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
98
	kasan_check_write(v, sizeof(*v));
99 100 101 102 103
	return arch_atomic64_add_unless(v, a, u);
}

static __always_inline void atomic_inc(atomic_t *v)
{
104
	kasan_check_write(v, sizeof(*v));
105 106 107 108 109
	arch_atomic_inc(v);
}

static __always_inline void atomic64_inc(atomic64_t *v)
{
110
	kasan_check_write(v, sizeof(*v));
111 112 113 114 115
	arch_atomic64_inc(v);
}

static __always_inline void atomic_dec(atomic_t *v)
{
116
	kasan_check_write(v, sizeof(*v));
117 118 119 120 121
	arch_atomic_dec(v);
}

static __always_inline void atomic64_dec(atomic64_t *v)
{
122
	kasan_check_write(v, sizeof(*v));
123 124 125 126 127
	arch_atomic64_dec(v);
}

static __always_inline void atomic_add(int i, atomic_t *v)
{
128
	kasan_check_write(v, sizeof(*v));
129 130 131 132 133
	arch_atomic_add(i, v);
}

static __always_inline void atomic64_add(s64 i, atomic64_t *v)
{
134
	kasan_check_write(v, sizeof(*v));
135 136 137 138 139
	arch_atomic64_add(i, v);
}

static __always_inline void atomic_sub(int i, atomic_t *v)
{
140
	kasan_check_write(v, sizeof(*v));
141 142 143 144 145
	arch_atomic_sub(i, v);
}

static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
{
146
	kasan_check_write(v, sizeof(*v));
147 148 149 150 151
	arch_atomic64_sub(i, v);
}

static __always_inline void atomic_and(int i, atomic_t *v)
{
152
	kasan_check_write(v, sizeof(*v));
153 154 155 156 157
	arch_atomic_and(i, v);
}

static __always_inline void atomic64_and(s64 i, atomic64_t *v)
{
158
	kasan_check_write(v, sizeof(*v));
159 160 161 162 163
	arch_atomic64_and(i, v);
}

static __always_inline void atomic_or(int i, atomic_t *v)
{
164
	kasan_check_write(v, sizeof(*v));
165 166 167 168 169
	arch_atomic_or(i, v);
}

static __always_inline void atomic64_or(s64 i, atomic64_t *v)
{
170
	kasan_check_write(v, sizeof(*v));
171 172 173 174 175
	arch_atomic64_or(i, v);
}

static __always_inline void atomic_xor(int i, atomic_t *v)
{
176
	kasan_check_write(v, sizeof(*v));
177 178 179 180 181
	arch_atomic_xor(i, v);
}

static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
{
182
	kasan_check_write(v, sizeof(*v));
183 184 185 186 187
	arch_atomic64_xor(i, v);
}

static __always_inline int atomic_inc_return(atomic_t *v)
{
188
	kasan_check_write(v, sizeof(*v));
189 190 191 192 193
	return arch_atomic_inc_return(v);
}

static __always_inline s64 atomic64_inc_return(atomic64_t *v)
{
194
	kasan_check_write(v, sizeof(*v));
195 196 197 198 199
	return arch_atomic64_inc_return(v);
}

static __always_inline int atomic_dec_return(atomic_t *v)
{
200
	kasan_check_write(v, sizeof(*v));
201 202 203 204 205
	return arch_atomic_dec_return(v);
}

static __always_inline s64 atomic64_dec_return(atomic64_t *v)
{
206
	kasan_check_write(v, sizeof(*v));
207 208 209
	return arch_atomic64_dec_return(v);
}

210 211
#ifdef arch_atomic64_inc_not_zero
#define atomic64_inc_not_zero atomic64_inc_not_zero
212
static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
213
{
214
	kasan_check_write(v, sizeof(*v));
215 216
	return arch_atomic64_inc_not_zero(v);
}
217
#endif
218 219 220

static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
221
	kasan_check_write(v, sizeof(*v));
222 223 224 225 226
	return arch_atomic64_dec_if_positive(v);
}

static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
227
	kasan_check_write(v, sizeof(*v));
228 229 230 231 232
	return arch_atomic_dec_and_test(v);
}

static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
{
233
	kasan_check_write(v, sizeof(*v));
234 235 236 237 238
	return arch_atomic64_dec_and_test(v);
}

static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
239
	kasan_check_write(v, sizeof(*v));
240 241 242 243 244
	return arch_atomic_inc_and_test(v);
}

static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
{
245
	kasan_check_write(v, sizeof(*v));
246 247 248 249 250
	return arch_atomic64_inc_and_test(v);
}

static __always_inline int atomic_add_return(int i, atomic_t *v)
{
251
	kasan_check_write(v, sizeof(*v));
252 253 254 255 256
	return arch_atomic_add_return(i, v);
}

static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
{
257
	kasan_check_write(v, sizeof(*v));
258 259 260 261 262
	return arch_atomic64_add_return(i, v);
}

static __always_inline int atomic_sub_return(int i, atomic_t *v)
{
263
	kasan_check_write(v, sizeof(*v));
264 265 266 267 268
	return arch_atomic_sub_return(i, v);
}

static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
{
269
	kasan_check_write(v, sizeof(*v));
270 271 272 273 274
	return arch_atomic64_sub_return(i, v);
}

static __always_inline int atomic_fetch_add(int i, atomic_t *v)
{
275
	kasan_check_write(v, sizeof(*v));
276 277 278 279 280
	return arch_atomic_fetch_add(i, v);
}

static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
{
281
	kasan_check_write(v, sizeof(*v));
282 283 284 285 286
	return arch_atomic64_fetch_add(i, v);
}

static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
{
287
	kasan_check_write(v, sizeof(*v));
288 289 290 291 292
	return arch_atomic_fetch_sub(i, v);
}

static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
{
293
	kasan_check_write(v, sizeof(*v));
294 295 296 297 298
	return arch_atomic64_fetch_sub(i, v);
}

static __always_inline int atomic_fetch_and(int i, atomic_t *v)
{
299
	kasan_check_write(v, sizeof(*v));
300 301 302 303 304
	return arch_atomic_fetch_and(i, v);
}

static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
{
305
	kasan_check_write(v, sizeof(*v));
306 307 308 309 310
	return arch_atomic64_fetch_and(i, v);
}

static __always_inline int atomic_fetch_or(int i, atomic_t *v)
{
311
	kasan_check_write(v, sizeof(*v));
312 313 314 315 316
	return arch_atomic_fetch_or(i, v);
}

static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
{
317
	kasan_check_write(v, sizeof(*v));
318 319 320 321 322
	return arch_atomic64_fetch_or(i, v);
}

static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
{
323
	kasan_check_write(v, sizeof(*v));
324 325 326 327 328
	return arch_atomic_fetch_xor(i, v);
}

static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
{
329
	kasan_check_write(v, sizeof(*v));
330 331 332 333 334
	return arch_atomic64_fetch_xor(i, v);
}

static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
335
	kasan_check_write(v, sizeof(*v));
336 337 338 339 340
	return arch_atomic_sub_and_test(i, v);
}

static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
341
	kasan_check_write(v, sizeof(*v));
342 343 344 345 346
	return arch_atomic64_sub_and_test(i, v);
}

static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
347
	kasan_check_write(v, sizeof(*v));
348 349 350 351 352
	return arch_atomic_add_negative(i, v);
}

static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
{
353
	kasan_check_write(v, sizeof(*v));
354 355 356 357 358 359
	return arch_atomic64_add_negative(i, v);
}

static __always_inline unsigned long
cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
360
	kasan_check_write(ptr, size);
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
	switch (size) {
	case 1:
		return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
	case 2:
		return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
	case 4:
		return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
	case 8:
		BUILD_BUG_ON(sizeof(unsigned long) != 8);
		return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
	}
	BUILD_BUG();
	return 0;
}

#define cmpxchg(ptr, old, new)						\
({									\
	((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old),	\
		(unsigned long)(new), sizeof(*(ptr))));			\
})

static __always_inline unsigned long
sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
		  int size)
{
386
	kasan_check_write(ptr, size);
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
	switch (size) {
	case 1:
		return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
	case 2:
		return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
	case 4:
		return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
	case 8:
		BUILD_BUG_ON(sizeof(unsigned long) != 8);
		return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
	}
	BUILD_BUG();
	return 0;
}

#define sync_cmpxchg(ptr, old, new)					\
({									\
	((__typeof__(*(ptr)))sync_cmpxchg_size((ptr),			\
		(unsigned long)(old), (unsigned long)(new),		\
		sizeof(*(ptr))));					\
})

static __always_inline unsigned long
cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
		   int size)
{
413
	kasan_check_write(ptr, size);
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
	switch (size) {
	case 1:
		return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
	case 2:
		return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
	case 4:
		return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
	case 8:
		BUILD_BUG_ON(sizeof(unsigned long) != 8);
		return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
	}
	BUILD_BUG();
	return 0;
}

#define cmpxchg_local(ptr, old, new)					\
({									\
	((__typeof__(*(ptr)))cmpxchg_local_size((ptr),			\
		(unsigned long)(old), (unsigned long)(new),		\
		sizeof(*(ptr))));					\
})

static __always_inline u64
cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
{
439
	kasan_check_write(ptr, sizeof(*ptr));
440 441 442 443 444 445 446 447 448 449 450 451
	return arch_cmpxchg64(ptr, old, new);
}

#define cmpxchg64(ptr, old, new)					\
({									\
	((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old),		\
		(u64)(new)));						\
})

static __always_inline u64
cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
{
452
	kasan_check_write(ptr, sizeof(*ptr));
453 454 455 456 457 458 459 460 461
	return arch_cmpxchg64_local(ptr, old, new);
}

#define cmpxchg64_local(ptr, old, new)					\
({									\
	((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old),	\
		(u64)(new)));						\
})

462 463 464 465 466 467 468 469 470
/*
 * Originally we had the following code here:
 *     __typeof__(p1) ____p1 = (p1);
 *     kasan_check_write(____p1, 2 * sizeof(*____p1));
 *     arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
 * But it leads to compilation failures (see gcc issue 72873).
 * So for now it's left non-instrumented.
 * There are few callers of cmpxchg_double(), so it's not critical.
 */
471 472 473 474 475 476 477 478 479 480 481
#define cmpxchg_double(p1, p2, o1, o2, n1, n2)				\
({									\
	arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2));	\
})

#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2)			\
({									\
	arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2));	\
})

#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */