atomic-instrumented.h 11.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * This file provides wrappers with KASAN instrumentation for atomic operations.
 * To use this functionality an arch's atomic.h file needs to define all
 * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
 * this file at the end. This file provides atomic_read() that forwards to
 * arch_atomic_read() for actual atomic operation.
 * Note: if an arch atomic operation is implemented by means of other atomic
 * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
 * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
 * double instrumentation.
 */

13 14 15 16
#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
#define _LINUX_ATOMIC_INSTRUMENTED_H

#include <linux/build_bug.h>
17
#include <linux/kasan-checks.h>
18 19 20

static __always_inline int atomic_read(const atomic_t *v)
{
21
	kasan_check_read(v, sizeof(*v));
22 23 24 25 26
	return arch_atomic_read(v);
}

static __always_inline s64 atomic64_read(const atomic64_t *v)
{
27
	kasan_check_read(v, sizeof(*v));
28 29 30 31 32
	return arch_atomic64_read(v);
}

static __always_inline void atomic_set(atomic_t *v, int i)
{
33
	kasan_check_write(v, sizeof(*v));
34 35 36 37 38
	arch_atomic_set(v, i);
}

static __always_inline void atomic64_set(atomic64_t *v, s64 i)
{
39
	kasan_check_write(v, sizeof(*v));
40 41 42 43 44
	arch_atomic64_set(v, i);
}

static __always_inline int atomic_xchg(atomic_t *v, int i)
{
45
	kasan_check_write(v, sizeof(*v));
46 47 48 49 50
	return arch_atomic_xchg(v, i);
}

static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
{
51
	kasan_check_write(v, sizeof(*v));
52 53 54 55 56
	return arch_atomic64_xchg(v, i);
}

static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
57
	kasan_check_write(v, sizeof(*v));
58 59 60 61 62
	return arch_atomic_cmpxchg(v, old, new);
}

static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
63
	kasan_check_write(v, sizeof(*v));
64 65 66 67 68 69 70
	return arch_atomic64_cmpxchg(v, old, new);
}

#ifdef arch_atomic_try_cmpxchg
#define atomic_try_cmpxchg atomic_try_cmpxchg
static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
71 72
	kasan_check_write(v, sizeof(*v));
	kasan_check_read(old, sizeof(*old));
73 74 75 76 77 78 79 80
	return arch_atomic_try_cmpxchg(v, old, new);
}
#endif

#ifdef arch_atomic64_try_cmpxchg
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
81 82
	kasan_check_write(v, sizeof(*v));
	kasan_check_read(old, sizeof(*old));
83 84 85 86 87 88
	return arch_atomic64_try_cmpxchg(v, old, new);
}
#endif

static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
89
	kasan_check_write(v, sizeof(*v));
90 91 92 93 94 95
	return __arch_atomic_add_unless(v, a, u);
}


static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
96
	kasan_check_write(v, sizeof(*v));
97 98 99 100 101
	return arch_atomic64_add_unless(v, a, u);
}

static __always_inline void atomic_inc(atomic_t *v)
{
102
	kasan_check_write(v, sizeof(*v));
103 104 105 106 107
	arch_atomic_inc(v);
}

static __always_inline void atomic64_inc(atomic64_t *v)
{
108
	kasan_check_write(v, sizeof(*v));
109 110 111 112 113
	arch_atomic64_inc(v);
}

static __always_inline void atomic_dec(atomic_t *v)
{
114
	kasan_check_write(v, sizeof(*v));
115 116 117 118 119
	arch_atomic_dec(v);
}

static __always_inline void atomic64_dec(atomic64_t *v)
{
120
	kasan_check_write(v, sizeof(*v));
121 122 123 124 125
	arch_atomic64_dec(v);
}

static __always_inline void atomic_add(int i, atomic_t *v)
{
126
	kasan_check_write(v, sizeof(*v));
127 128 129 130 131
	arch_atomic_add(i, v);
}

static __always_inline void atomic64_add(s64 i, atomic64_t *v)
{
132
	kasan_check_write(v, sizeof(*v));
133 134 135 136 137
	arch_atomic64_add(i, v);
}

static __always_inline void atomic_sub(int i, atomic_t *v)
{
138
	kasan_check_write(v, sizeof(*v));
139 140 141 142 143
	arch_atomic_sub(i, v);
}

static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
{
144
	kasan_check_write(v, sizeof(*v));
145 146 147 148 149
	arch_atomic64_sub(i, v);
}

static __always_inline void atomic_and(int i, atomic_t *v)
{
150
	kasan_check_write(v, sizeof(*v));
151 152 153 154 155
	arch_atomic_and(i, v);
}

static __always_inline void atomic64_and(s64 i, atomic64_t *v)
{
156
	kasan_check_write(v, sizeof(*v));
157 158 159 160 161
	arch_atomic64_and(i, v);
}

static __always_inline void atomic_or(int i, atomic_t *v)
{
162
	kasan_check_write(v, sizeof(*v));
163 164 165 166 167
	arch_atomic_or(i, v);
}

static __always_inline void atomic64_or(s64 i, atomic64_t *v)
{
168
	kasan_check_write(v, sizeof(*v));
169 170 171 172 173
	arch_atomic64_or(i, v);
}

static __always_inline void atomic_xor(int i, atomic_t *v)
{
174
	kasan_check_write(v, sizeof(*v));
175 176 177 178 179
	arch_atomic_xor(i, v);
}

static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
{
180
	kasan_check_write(v, sizeof(*v));
181 182 183 184 185
	arch_atomic64_xor(i, v);
}

static __always_inline int atomic_inc_return(atomic_t *v)
{
186
	kasan_check_write(v, sizeof(*v));
187 188 189 190 191
	return arch_atomic_inc_return(v);
}

static __always_inline s64 atomic64_inc_return(atomic64_t *v)
{
192
	kasan_check_write(v, sizeof(*v));
193 194 195 196 197
	return arch_atomic64_inc_return(v);
}

static __always_inline int atomic_dec_return(atomic_t *v)
{
198
	kasan_check_write(v, sizeof(*v));
199 200 201 202 203
	return arch_atomic_dec_return(v);
}

static __always_inline s64 atomic64_dec_return(atomic64_t *v)
{
204
	kasan_check_write(v, sizeof(*v));
205 206 207 208 209
	return arch_atomic64_dec_return(v);
}

static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
{
210
	kasan_check_write(v, sizeof(*v));
211 212 213 214 215
	return arch_atomic64_inc_not_zero(v);
}

static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
216
	kasan_check_write(v, sizeof(*v));
217 218 219 220 221
	return arch_atomic64_dec_if_positive(v);
}

static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
222
	kasan_check_write(v, sizeof(*v));
223 224 225 226 227
	return arch_atomic_dec_and_test(v);
}

static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
{
228
	kasan_check_write(v, sizeof(*v));
229 230 231 232 233
	return arch_atomic64_dec_and_test(v);
}

static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
234
	kasan_check_write(v, sizeof(*v));
235 236 237 238 239
	return arch_atomic_inc_and_test(v);
}

static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
{
240
	kasan_check_write(v, sizeof(*v));
241 242 243 244 245
	return arch_atomic64_inc_and_test(v);
}

static __always_inline int atomic_add_return(int i, atomic_t *v)
{
246
	kasan_check_write(v, sizeof(*v));
247 248 249 250 251
	return arch_atomic_add_return(i, v);
}

static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
{
252
	kasan_check_write(v, sizeof(*v));
253 254 255 256 257
	return arch_atomic64_add_return(i, v);
}

static __always_inline int atomic_sub_return(int i, atomic_t *v)
{
258
	kasan_check_write(v, sizeof(*v));
259 260 261 262 263
	return arch_atomic_sub_return(i, v);
}

static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
{
264
	kasan_check_write(v, sizeof(*v));
265 266 267 268 269
	return arch_atomic64_sub_return(i, v);
}

static __always_inline int atomic_fetch_add(int i, atomic_t *v)
{
270
	kasan_check_write(v, sizeof(*v));
271 272 273 274 275
	return arch_atomic_fetch_add(i, v);
}

static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
{
276
	kasan_check_write(v, sizeof(*v));
277 278 279 280 281
	return arch_atomic64_fetch_add(i, v);
}

static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
{
282
	kasan_check_write(v, sizeof(*v));
283 284 285 286 287
	return arch_atomic_fetch_sub(i, v);
}

static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
{
288
	kasan_check_write(v, sizeof(*v));
289 290 291 292 293
	return arch_atomic64_fetch_sub(i, v);
}

static __always_inline int atomic_fetch_and(int i, atomic_t *v)
{
294
	kasan_check_write(v, sizeof(*v));
295 296 297 298 299
	return arch_atomic_fetch_and(i, v);
}

static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
{
300
	kasan_check_write(v, sizeof(*v));
301 302 303 304 305
	return arch_atomic64_fetch_and(i, v);
}

static __always_inline int atomic_fetch_or(int i, atomic_t *v)
{
306
	kasan_check_write(v, sizeof(*v));
307 308 309 310 311
	return arch_atomic_fetch_or(i, v);
}

static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
{
312
	kasan_check_write(v, sizeof(*v));
313 314 315 316 317
	return arch_atomic64_fetch_or(i, v);
}

static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
{
318
	kasan_check_write(v, sizeof(*v));
319 320 321 322 323
	return arch_atomic_fetch_xor(i, v);
}

static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
{
324
	kasan_check_write(v, sizeof(*v));
325 326 327 328 329
	return arch_atomic64_fetch_xor(i, v);
}

static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
330
	kasan_check_write(v, sizeof(*v));
331 332 333 334 335
	return arch_atomic_sub_and_test(i, v);
}

static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
336
	kasan_check_write(v, sizeof(*v));
337 338 339 340 341
	return arch_atomic64_sub_and_test(i, v);
}

static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
342
	kasan_check_write(v, sizeof(*v));
343 344 345 346 347
	return arch_atomic_add_negative(i, v);
}

static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
{
348
	kasan_check_write(v, sizeof(*v));
349 350 351 352 353 354
	return arch_atomic64_add_negative(i, v);
}

static __always_inline unsigned long
cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
355
	kasan_check_write(ptr, size);
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	switch (size) {
	case 1:
		return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
	case 2:
		return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
	case 4:
		return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
	case 8:
		BUILD_BUG_ON(sizeof(unsigned long) != 8);
		return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
	}
	BUILD_BUG();
	return 0;
}

#define cmpxchg(ptr, old, new)						\
({									\
	((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old),	\
		(unsigned long)(new), sizeof(*(ptr))));			\
})

static __always_inline unsigned long
sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
		  int size)
{
381
	kasan_check_write(ptr, size);
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	switch (size) {
	case 1:
		return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
	case 2:
		return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
	case 4:
		return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
	case 8:
		BUILD_BUG_ON(sizeof(unsigned long) != 8);
		return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
	}
	BUILD_BUG();
	return 0;
}

#define sync_cmpxchg(ptr, old, new)					\
({									\
	((__typeof__(*(ptr)))sync_cmpxchg_size((ptr),			\
		(unsigned long)(old), (unsigned long)(new),		\
		sizeof(*(ptr))));					\
})

static __always_inline unsigned long
cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
		   int size)
{
408
	kasan_check_write(ptr, size);
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	switch (size) {
	case 1:
		return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
	case 2:
		return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
	case 4:
		return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
	case 8:
		BUILD_BUG_ON(sizeof(unsigned long) != 8);
		return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
	}
	BUILD_BUG();
	return 0;
}

#define cmpxchg_local(ptr, old, new)					\
({									\
	((__typeof__(*(ptr)))cmpxchg_local_size((ptr),			\
		(unsigned long)(old), (unsigned long)(new),		\
		sizeof(*(ptr))));					\
})

static __always_inline u64
cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
{
434
	kasan_check_write(ptr, sizeof(*ptr));
435 436 437 438 439 440 441 442 443 444 445 446
	return arch_cmpxchg64(ptr, old, new);
}

#define cmpxchg64(ptr, old, new)					\
({									\
	((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old),		\
		(u64)(new)));						\
})

static __always_inline u64
cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
{
447
	kasan_check_write(ptr, sizeof(*ptr));
448 449 450 451 452 453 454 455 456
	return arch_cmpxchg64_local(ptr, old, new);
}

#define cmpxchg64_local(ptr, old, new)					\
({									\
	((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old),	\
		(u64)(new)));						\
})

457 458 459 460 461 462 463 464 465
/*
 * Originally we had the following code here:
 *     __typeof__(p1) ____p1 = (p1);
 *     kasan_check_write(____p1, 2 * sizeof(*____p1));
 *     arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
 * But it leads to compilation failures (see gcc issue 72873).
 * So for now it's left non-instrumented.
 * There are few callers of cmpxchg_double(), so it's not critical.
 */
466 467 468 469 470 471 472 473 474 475 476
#define cmpxchg_double(p1, p2, o1, o2, n1, n2)				\
({									\
	arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2));	\
})

#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2)			\
({									\
	arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2));	\
})

#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */