percpu.h 19.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2
#ifndef __LINUX_PERCPU_H
#define __LINUX_PERCPU_H
3

4
#include <linux/preempt.h>
L
Linus Torvalds 已提交
5 6
#include <linux/slab.h> /* For kmalloc() */
#include <linux/smp.h>
7
#include <linux/cpumask.h>
T
Tejun Heo 已提交
8
#include <linux/pfn.h>
9

L
Linus Torvalds 已提交
10 11
#include <asm/percpu.h>

T
Tejun Heo 已提交
12
/* enough to cover all DEFINE_PER_CPUs in modules */
13
#ifdef CONFIG_MODULES
T
Tejun Heo 已提交
14
#define PERCPU_MODULE_RESERVE		(8 << 10)
15
#else
T
Tejun Heo 已提交
16
#define PERCPU_MODULE_RESERVE		0
L
Linus Torvalds 已提交
17 18
#endif

T
Tejun Heo 已提交
19
#ifndef PERCPU_ENOUGH_ROOM
20
#define PERCPU_ENOUGH_ROOM						\
T
Tejun Heo 已提交
21 22 23
	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +	\
	 PERCPU_MODULE_RESERVE)
#endif
24

25 26 27 28 29 30 31
/*
 * Must be an lvalue. Since @var must be a simple identifier,
 * we force a syntax error here if it isn't.
 */
#define get_cpu_var(var) (*({				\
	preempt_disable();				\
	&__get_cpu_var(var); }))
T
Tejun Heo 已提交
32

R
Rusty Russell 已提交
33 34 35 36
/*
 * The weird & is necessary because sparse considers (void)(var) to be
 * a direct dereference of percpu variable (var).
 */
T
Tejun Heo 已提交
37
#define put_cpu_var(var) do {				\
R
Rusty Russell 已提交
38
	(void)&(var);					\
T
Tejun Heo 已提交
39 40
	preempt_enable();				\
} while (0)
L
Linus Torvalds 已提交
41 42 43

#ifdef CONFIG_SMP

44
/* minimum unit size, also is the maximum supported allocation size */
T
Tejun Heo 已提交
45
#define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(64 << 10)
46 47 48

/*
 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
49 50 51
 * back on the first chunk for dynamic percpu allocation if arch is
 * manually allocating and mapping it for faster access (as a part of
 * large page mapping for example).
52
 *
53 54 55 56
 * The following values give between one and two pages of free space
 * after typical minimal boot (2-way SMP, single disk and NIC) with
 * both defconfig and a distro config on x86_64 and 32.  More
 * intelligent way to determine this would be nice.
57
 */
58 59 60 61 62
#if BITS_PER_LONG > 32
#define PERCPU_DYNAMIC_RESERVE		(20 << 10)
#else
#define PERCPU_DYNAMIC_RESERVE		(12 << 10)
#endif
63

64
extern void *pcpu_base_addr;
T
Tejun Heo 已提交
65
extern const unsigned long *pcpu_unit_offsets;
L
Linus Torvalds 已提交
66

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
struct pcpu_group_info {
	int			nr_units;	/* aligned # of units */
	unsigned long		base_offset;	/* base address offset */
	unsigned int		*cpu_map;	/* unit->cpu map, empty
						 * entries contain NR_CPUS */
};

struct pcpu_alloc_info {
	size_t			static_size;
	size_t			reserved_size;
	size_t			dyn_size;
	size_t			unit_size;
	size_t			atom_size;
	size_t			alloc_size;
	size_t			__ai_size;	/* internal, don't use */
	int			nr_groups;	/* 0 if grouping unnecessary */
	struct pcpu_group_info	groups[];
};

86 87 88 89 90 91 92 93 94 95 96
enum pcpu_fc {
	PCPU_FC_AUTO,
	PCPU_FC_EMBED,
	PCPU_FC_PAGE,

	PCPU_FC_NR,
};
extern const char *pcpu_fc_names[PCPU_FC_NR];

extern enum pcpu_fc pcpu_chosen_fc;

97 98
typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
				     size_t align);
99 100
typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
101
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
102

103 104 105 106 107 108 109
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
							     int nr_units);
extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);

extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
				size_t reserved_size, ssize_t dyn_size,
				size_t atom_size,
110 111
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn);

T
Tejun Heo 已提交
112 113
extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
					 void *base_addr);
114

115
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
116 117 118 119 120
extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
				size_t atom_size,
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
				pcpu_fc_alloc_fn_t alloc_fn,
				pcpu_fc_free_fn_t free_fn);
121
#endif
122

123
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
T
Tejun Heo 已提交
124
extern int __init pcpu_page_first_chunk(size_t reserved_size,
125 126 127
				pcpu_fc_alloc_fn_t alloc_fn,
				pcpu_fc_free_fn_t free_fn,
				pcpu_fc_populate_pte_fn_t populate_pte_fn);
128
#endif
129

130 131 132 133 134
/*
 * Use this to get to a cpu's version of the per-cpu object
 * dynamically allocated. Non-atomic access to the current CPU's
 * version should probably be combined with get_cpu()/put_cpu().
 */
135 136
#define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))

R
Rusty Russell 已提交
137 138 139
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata);
140
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
L
Linus Torvalds 已提交
141

142 143 144 145
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
#endif

L
Linus Torvalds 已提交
146 147
#else /* CONFIG_SMP */

148
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
149

R
Rusty Russell 已提交
150
static inline void __percpu *__alloc_percpu(size_t size, size_t align)
151
{
152 153 154 155 156
	/*
	 * Can't easily make larger alignment work with kmalloc.  WARN
	 * on it.  Larger alignment should only be used for module
	 * percpu sections on SMP for which this path isn't used.
	 */
157
	WARN_ON_ONCE(align > SMP_CACHE_BYTES);
I
Ingo Molnar 已提交
158
	return kzalloc(size, GFP_KERNEL);
159 160
}

R
Rusty Russell 已提交
161
static inline void free_percpu(void __percpu *p)
162
{
163
	kfree(p);
L
Linus Torvalds 已提交
164 165
}

166 167 168 169 170
static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
	return __pa(addr);
}

171 172
static inline void __init setup_per_cpu_areas(void) { }

173 174 175 176 177
static inline void *pcpu_lpage_remapped(void *kaddr)
{
	return NULL;
}

L
Linus Torvalds 已提交
178 179
#endif /* CONFIG_SMP */

180
#define alloc_percpu(type)	\
R
Rusty Russell 已提交
181
	(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
L
Linus Torvalds 已提交
182

T
Tejun Heo 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
/*
 * Optional methods for optimized non-lvalue per-cpu variable access.
 *
 * @var can be a percpu variable or a field of it and its size should
 * equal char, int or long.  percpu_read() evaluates to a lvalue and
 * all others to void.
 *
 * These operations are guaranteed to be atomic w.r.t. preemption.
 * The generic versions use plain get/put_cpu_var().  Archs are
 * encouraged to implement single-instruction alternatives which don't
 * require preemption protection.
 */
#ifndef percpu_read
# define percpu_read(var)						\
  ({									\
T
Tejun Heo 已提交
198 199 200 201 202
	typeof(var) *pr_ptr__ = &(var);					\
	typeof(var) pr_ret__;						\
	pr_ret__ = get_cpu_var(*pr_ptr__);				\
	put_cpu_var(*pr_ptr__);						\
	pr_ret__;							\
T
Tejun Heo 已提交
203 204 205 206 207
  })
#endif

#define __percpu_generic_to_op(var, val, op)				\
do {									\
T
Tejun Heo 已提交
208 209 210
	typeof(var) *pgto_ptr__ = &(var);				\
	get_cpu_var(*pgto_ptr__) op val;				\
	put_cpu_var(*pgto_ptr__);					\
T
Tejun Heo 已提交
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
} while (0)

#ifndef percpu_write
# define percpu_write(var, val)		__percpu_generic_to_op(var, (val), =)
#endif

#ifndef percpu_add
# define percpu_add(var, val)		__percpu_generic_to_op(var, (val), +=)
#endif

#ifndef percpu_sub
# define percpu_sub(var, val)		__percpu_generic_to_op(var, (val), -=)
#endif

#ifndef percpu_and
# define percpu_and(var, val)		__percpu_generic_to_op(var, (val), &=)
#endif

#ifndef percpu_or
# define percpu_or(var, val)		__percpu_generic_to_op(var, (val), |=)
#endif

#ifndef percpu_xor
# define percpu_xor(var, val)		__percpu_generic_to_op(var, (val), ^=)
#endif

237 238 239 240 241 242 243
/*
 * Branching function to split up a function into a set of functions that
 * are called for different scalar sizes of the objects handled.
 */

extern void __bad_size_call_parameter(void);

T
Tejun Heo 已提交
244 245
#define __pcpu_size_call_return(stem, variable)				\
({	typeof(variable) pscr_ret__;					\
246
	__verify_pcpu_ptr(&(variable));					\
247
	switch(sizeof(variable)) {					\
T
Tejun Heo 已提交
248 249 250 251
	case 1: pscr_ret__ = stem##1(variable);break;			\
	case 2: pscr_ret__ = stem##2(variable);break;			\
	case 4: pscr_ret__ = stem##4(variable);break;			\
	case 8: pscr_ret__ = stem##8(variable);break;			\
252 253 254
	default:							\
		__bad_size_call_parameter();break;			\
	}								\
T
Tejun Heo 已提交
255
	pscr_ret__;							\
256 257
})

T
Tejun Heo 已提交
258
#define __pcpu_size_call(stem, variable, ...)				\
259
do {									\
260
	__verify_pcpu_ptr(&(variable));					\
261 262 263 264 265 266 267 268 269 270 271 272
	switch(sizeof(variable)) {					\
		case 1: stem##1(variable, __VA_ARGS__);break;		\
		case 2: stem##2(variable, __VA_ARGS__);break;		\
		case 4: stem##4(variable, __VA_ARGS__);break;		\
		case 8: stem##8(variable, __VA_ARGS__);break;		\
		default: 						\
			__bad_size_call_parameter();break;		\
	}								\
} while (0)

/*
 * Optimized manipulation for memory allocated through the per cpu
R
Rusty Russell 已提交
273
 * allocator or for addresses of per cpu variables.
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
 *
 * These operation guarantee exclusivity of access for other operations
 * on the *same* processor. The assumption is that per cpu data is only
 * accessed by a single processor instance (the current one).
 *
 * The first group is used for accesses that must be done in a
 * preemption safe way since we know that the context is not preempt
 * safe. Interrupts may occur. If the interrupt modifies the variable
 * too then RMW actions will not be reliable.
 *
 * The arch code can provide optimized functions in two ways:
 *
 * 1. Override the function completely. F.e. define this_cpu_add().
 *    The arch must then ensure that the various scalar format passed
 *    are handled correctly.
 *
 * 2. Provide functions for certain scalar sizes. F.e. provide
 *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
 *    sized RMW actions. If arch code does not provide operations for
 *    a scalar size then the fallback in the generic code will be
 *    used.
 */

#define _this_cpu_generic_read(pcp)					\
({	typeof(pcp) ret__;						\
	preempt_disable();						\
	ret__ = *this_cpu_ptr(&(pcp));					\
	preempt_enable();						\
	ret__;								\
})

#ifndef this_cpu_read
# ifndef this_cpu_read_1
#  define this_cpu_read_1(pcp)	_this_cpu_generic_read(pcp)
# endif
# ifndef this_cpu_read_2
#  define this_cpu_read_2(pcp)	_this_cpu_generic_read(pcp)
# endif
# ifndef this_cpu_read_4
#  define this_cpu_read_4(pcp)	_this_cpu_generic_read(pcp)
# endif
# ifndef this_cpu_read_8
#  define this_cpu_read_8(pcp)	_this_cpu_generic_read(pcp)
# endif
T
Tejun Heo 已提交
318
# define this_cpu_read(pcp)	__pcpu_size_call_return(this_cpu_read_, (pcp))
319 320 321 322 323
#endif

#define _this_cpu_generic_to_op(pcp, val, op)				\
do {									\
	preempt_disable();						\
T
Tejun Heo 已提交
324
	*__this_cpu_ptr(&(pcp)) op val;					\
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
	preempt_enable();						\
} while (0)

#ifndef this_cpu_write
# ifndef this_cpu_write_1
#  define this_cpu_write_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef this_cpu_write_2
#  define this_cpu_write_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef this_cpu_write_4
#  define this_cpu_write_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef this_cpu_write_8
#  define this_cpu_write_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
# endif
T
Tejun Heo 已提交
341
# define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, (pcp), (val))
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
#endif

#ifndef this_cpu_add
# ifndef this_cpu_add_1
#  define this_cpu_add_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef this_cpu_add_2
#  define this_cpu_add_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef this_cpu_add_4
#  define this_cpu_add_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef this_cpu_add_8
#  define this_cpu_add_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
# endif
T
Tejun Heo 已提交
357
# define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, (pcp), (val))
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
#endif

#ifndef this_cpu_sub
# define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(val))
#endif

#ifndef this_cpu_inc
# define this_cpu_inc(pcp)		this_cpu_add((pcp), 1)
#endif

#ifndef this_cpu_dec
# define this_cpu_dec(pcp)		this_cpu_sub((pcp), 1)
#endif

#ifndef this_cpu_and
# ifndef this_cpu_and_1
#  define this_cpu_and_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef this_cpu_and_2
#  define this_cpu_and_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef this_cpu_and_4
#  define this_cpu_and_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef this_cpu_and_8
#  define this_cpu_and_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
# endif
T
Tejun Heo 已提交
385
# define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, (pcp), (val))
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
#endif

#ifndef this_cpu_or
# ifndef this_cpu_or_1
#  define this_cpu_or_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef this_cpu_or_2
#  define this_cpu_or_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef this_cpu_or_4
#  define this_cpu_or_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef this_cpu_or_8
#  define this_cpu_or_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
# endif
T
Tejun Heo 已提交
401
# define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
#endif

#ifndef this_cpu_xor
# ifndef this_cpu_xor_1
#  define this_cpu_xor_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef this_cpu_xor_2
#  define this_cpu_xor_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef this_cpu_xor_4
#  define this_cpu_xor_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef this_cpu_xor_8
#  define this_cpu_xor_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
# endif
T
Tejun Heo 已提交
417
# define this_cpu_xor(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
#endif

/*
 * Generic percpu operations that do not require preemption handling.
 * Either we do not care about races or the caller has the
 * responsibility of handling preemptions issues. Arch code can still
 * override these instructions since the arch per cpu code may be more
 * efficient and may actually get race freeness for free (that is the
 * case for x86 for example).
 *
 * If there is no other protection through preempt disable and/or
 * disabling interupts then one of these RMW operations can show unexpected
 * behavior because the execution thread was rescheduled on another processor
 * or an interrupt occurred and the same percpu variable was modified from
 * the interrupt context.
 */
#ifndef __this_cpu_read
# ifndef __this_cpu_read_1
#  define __this_cpu_read_1(pcp)	(*__this_cpu_ptr(&(pcp)))
# endif
# ifndef __this_cpu_read_2
#  define __this_cpu_read_2(pcp)	(*__this_cpu_ptr(&(pcp)))
# endif
# ifndef __this_cpu_read_4
#  define __this_cpu_read_4(pcp)	(*__this_cpu_ptr(&(pcp)))
# endif
# ifndef __this_cpu_read_8
#  define __this_cpu_read_8(pcp)	(*__this_cpu_ptr(&(pcp)))
# endif
T
Tejun Heo 已提交
447
# define __this_cpu_read(pcp)	__pcpu_size_call_return(__this_cpu_read_, (pcp))
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
#endif

#define __this_cpu_generic_to_op(pcp, val, op)				\
do {									\
	*__this_cpu_ptr(&(pcp)) op val;					\
} while (0)

#ifndef __this_cpu_write
# ifndef __this_cpu_write_1
#  define __this_cpu_write_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef __this_cpu_write_2
#  define __this_cpu_write_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef __this_cpu_write_4
#  define __this_cpu_write_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
# endif
# ifndef __this_cpu_write_8
#  define __this_cpu_write_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
# endif
T
Tejun Heo 已提交
468
# define __this_cpu_write(pcp, val)	__pcpu_size_call(__this_cpu_write_, (pcp), (val))
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
#endif

#ifndef __this_cpu_add
# ifndef __this_cpu_add_1
#  define __this_cpu_add_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef __this_cpu_add_2
#  define __this_cpu_add_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef __this_cpu_add_4
#  define __this_cpu_add_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef __this_cpu_add_8
#  define __this_cpu_add_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
# endif
T
Tejun Heo 已提交
484
# define __this_cpu_add(pcp, val)	__pcpu_size_call(__this_cpu_add_, (pcp), (val))
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
#endif

#ifndef __this_cpu_sub
# define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(val))
#endif

#ifndef __this_cpu_inc
# define __this_cpu_inc(pcp)		__this_cpu_add((pcp), 1)
#endif

#ifndef __this_cpu_dec
# define __this_cpu_dec(pcp)		__this_cpu_sub((pcp), 1)
#endif

#ifndef __this_cpu_and
# ifndef __this_cpu_and_1
#  define __this_cpu_and_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef __this_cpu_and_2
#  define __this_cpu_and_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef __this_cpu_and_4
#  define __this_cpu_and_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef __this_cpu_and_8
#  define __this_cpu_and_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
# endif
T
Tejun Heo 已提交
512
# define __this_cpu_and(pcp, val)	__pcpu_size_call(__this_cpu_and_, (pcp), (val))
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
#endif

#ifndef __this_cpu_or
# ifndef __this_cpu_or_1
#  define __this_cpu_or_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef __this_cpu_or_2
#  define __this_cpu_or_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef __this_cpu_or_4
#  define __this_cpu_or_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef __this_cpu_or_8
#  define __this_cpu_or_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
# endif
T
Tejun Heo 已提交
528
# define __this_cpu_or(pcp, val)	__pcpu_size_call(__this_cpu_or_, (pcp), (val))
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
#endif

#ifndef __this_cpu_xor
# ifndef __this_cpu_xor_1
#  define __this_cpu_xor_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef __this_cpu_xor_2
#  define __this_cpu_xor_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef __this_cpu_xor_4
#  define __this_cpu_xor_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef __this_cpu_xor_8
#  define __this_cpu_xor_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
# endif
T
Tejun Heo 已提交
544
# define __this_cpu_xor(pcp, val)	__pcpu_size_call(__this_cpu_xor_, (pcp), (val))
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
#endif

/*
 * IRQ safe versions of the per cpu RMW operations. Note that these operations
 * are *not* safe against modification of the same variable from another
 * processors (which one gets when using regular atomic operations)
 . They are guaranteed to be atomic vs. local interrupts and
 * preemption only.
 */
#define irqsafe_cpu_generic_to_op(pcp, val, op)				\
do {									\
	unsigned long flags;						\
	local_irq_save(flags);						\
	*__this_cpu_ptr(&(pcp)) op val;					\
	local_irq_restore(flags);					\
} while (0)

#ifndef irqsafe_cpu_add
# ifndef irqsafe_cpu_add_1
#  define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef irqsafe_cpu_add_2
#  define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef irqsafe_cpu_add_4
#  define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
# endif
# ifndef irqsafe_cpu_add_8
#  define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
# endif
T
Tejun Heo 已提交
575
# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
#endif

#ifndef irqsafe_cpu_sub
# define irqsafe_cpu_sub(pcp, val)	irqsafe_cpu_add((pcp), -(val))
#endif

#ifndef irqsafe_cpu_inc
# define irqsafe_cpu_inc(pcp)	irqsafe_cpu_add((pcp), 1)
#endif

#ifndef irqsafe_cpu_dec
# define irqsafe_cpu_dec(pcp)	irqsafe_cpu_sub((pcp), 1)
#endif

#ifndef irqsafe_cpu_and
# ifndef irqsafe_cpu_and_1
#  define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef irqsafe_cpu_and_2
#  define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef irqsafe_cpu_and_4
#  define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
# endif
# ifndef irqsafe_cpu_and_8
#  define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
# endif
T
Tejun Heo 已提交
603
# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
#endif

#ifndef irqsafe_cpu_or
# ifndef irqsafe_cpu_or_1
#  define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef irqsafe_cpu_or_2
#  define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef irqsafe_cpu_or_4
#  define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
# endif
# ifndef irqsafe_cpu_or_8
#  define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
# endif
T
Tejun Heo 已提交
619
# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
#endif

#ifndef irqsafe_cpu_xor
# ifndef irqsafe_cpu_xor_1
#  define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef irqsafe_cpu_xor_2
#  define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef irqsafe_cpu_xor_4
#  define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
# endif
# ifndef irqsafe_cpu_xor_8
#  define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
# endif
T
Tejun Heo 已提交
635
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
636 637
#endif

L
Linus Torvalds 已提交
638
#endif /* __LINUX_PERCPU_H */