spinlock.h 11.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H

/*
I
Ingo Molnar 已提交
5 6 7 8 9 10
 * include/linux/spinlock.h - generic spinlock/rwlock declarations
 *
 * here's the role of the various spinlock/rwlock related include files:
 *
 * on SMP builds:
 *
11
 *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
I
Ingo Molnar 已提交
12 13 14 15 16
 *                        initializers
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
17
 *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
I
Ingo Molnar 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *                        implementations, mostly inline assembly code
 *
 *   (also included on UP-debug builds:)
 *
 *  linux/spinlock_api_smp.h:
 *                        contains the prototypes for the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
 *
 * on UP builds:
 *
 *  linux/spinlock_type_up.h:
 *                        contains the generic, simplified UP spinlock type.
 *                        (which is an empty structure on non-debug builds)
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
 *  linux/spinlock_up.h:
37
 *                        contains the arch_spin_*()/etc. version of UP
I
Ingo Molnar 已提交
38 39 40 41 42 43 44 45 46
 *                        builds. (which are NOPs on non-debug, non-preempt
 *                        builds)
 *
 *   (included on UP-non-debug builds:)
 *
 *  linux/spinlock_api_up.h:
 *                        builds the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
L
Linus Torvalds 已提交
47 48
 */

49
#include <linux/typecheck.h>
L
Linus Torvalds 已提交
50 51 52
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
D
David Howells 已提交
53
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
54 55 56
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
A
Andrew Morton 已提交
57
#include <linux/bottom_half.h>
58
#include <asm/barrier.h>
L
Linus Torvalds 已提交
59 60 61 62 63


/*
 * Must define these before including other files, inline functions need them
 */
64
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75

#define LOCK_SECTION_START(extra)               \
        ".subsection 1\n\t"                     \
        extra                                   \
        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
        LOCK_SECTION_NAME ":\n\t"               \
        ".endif\n"

#define LOCK_SECTION_END                        \
        ".previous\n\t"

76
#define __lockfunc __attribute__((section(".spinlock.text")))
L
Linus Torvalds 已提交
77 78

/*
79
 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
L
Linus Torvalds 已提交
80
 */
I
Ingo Molnar 已提交
81
#include <linux/spinlock_types.h>
L
Linus Torvalds 已提交
82 83

/*
L
Lucas De Marchi 已提交
84
 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
L
Linus Torvalds 已提交
85
 */
86
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
87
# include <asm/spinlock.h>
L
Linus Torvalds 已提交
88
#else
I
Ingo Molnar 已提交
89
# include <linux/spinlock_up.h>
L
Linus Torvalds 已提交
90 91
#endif

92
#ifdef CONFIG_DEBUG_SPINLOCK
93 94 95
  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
				   struct lock_class_key *key);
# define raw_spin_lock_init(lock)				\
96 97 98
do {								\
	static struct lock_class_key __key;			\
								\
99
	__raw_spin_lock_init((lock), #lock, &__key);		\
100 101 102
} while (0)

#else
103 104
# define raw_spin_lock_init(lock)				\
	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 106
#endif

107
#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
I
Ingo Molnar 已提交
108

N
Nick Piggin 已提交
109
#ifdef CONFIG_GENERIC_LOCKBREAK
110
#define raw_spin_is_contended(lock) ((lock)->break_lock)
N
Nick Piggin 已提交
111
#else
112

113
#ifdef arch_spin_is_contended
114
#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
115
#else
116
#define raw_spin_is_contended(lock)	(((void)(lock), 0))
117
#endif /*arch_spin_is_contended*/
N
Nick Piggin 已提交
118 119
#endif

120 121 122 123 124 125 126 127 128 129 130
/*
 * Despite its name it doesn't necessarily has to be a full barrier.
 * It should only guarantee that a STORE before the critical section
 * can not be reordered with a LOAD inside this section.
 * spin_lock() is the one-way barrier, this LOAD can not escape out
 * of the region. So the default implementation simply ensures that
 * a STORE can not move into the critical section, smp_wmb() should
 * serialize it with another STORE done by spin_lock().
 */
#ifndef smp_mb__before_spinlock
#define smp_mb__before_spinlock()	smp_wmb()
131 132
#endif

133 134 135 136 137 138 139 140 141 142
/*
 * Place this after a lock-acquisition primitive to guarantee that
 * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
 * if the UNLOCK and LOCK are executed by the same CPU or if the
 * UNLOCK and LOCK operate on the same lock variable.
 */
#ifndef smp_mb__after_unlock_lock
#define smp_mb__after_unlock_lock()	do { } while (0)
#endif

I
Ingo Molnar 已提交
143
/**
144
 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
I
Ingo Molnar 已提交
145 146
 * @lock: the spinlock in question.
 */
147
#define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
I
Ingo Molnar 已提交
148 149

#ifdef CONFIG_DEBUG_SPINLOCK
150
 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
151 152
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
153
 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
I
Ingo Molnar 已提交
154
#else
155
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
156
{
157
	__acquire(lock);
158 159 160 161
	arch_spin_lock(&lock->raw_lock);
}

static inline void
162
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
163
{
164
	__acquire(lock);
165 166 167
	arch_spin_lock_flags(&lock->raw_lock, *flags);
}

168
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
169 170 171 172
{
	return arch_spin_trylock(&(lock)->raw_lock);
}

173
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
174 175
{
	arch_spin_unlock(&lock->raw_lock);
176
	__release(lock);
177
}
I
Ingo Molnar 已提交
178
#endif
L
Linus Torvalds 已提交
179 180

/*
181 182 183 184
 * Define the various spin_lock methods.  Note we define these
 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
 * various methods are defined as nops in the case they are not
 * required.
L
Linus Torvalds 已提交
185
 */
186
#define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
L
Linus Torvalds 已提交
187

188
#define raw_spin_lock(lock)	_raw_spin_lock(lock)
189 190

#ifdef CONFIG_DEBUG_LOCK_ALLOC
191 192
# define raw_spin_lock_nested(lock, subclass) \
	_raw_spin_lock_nested(lock, subclass)
193 194
# define raw_spin_lock_bh_nested(lock, subclass) \
	_raw_spin_lock_bh_nested(lock, subclass)
195

196
# define raw_spin_lock_nest_lock(lock, nest_lock)			\
P
Peter Zijlstra 已提交
197 198
	 do {								\
		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
199
		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
P
Peter Zijlstra 已提交
200
	 } while (0)
201
#else
202 203 204 205 206 207 208
/*
 * Always evaluate the 'subclass' argument to avoid that the compiler
 * warns about set-but-not-used variables when building with
 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
 */
# define raw_spin_lock_nested(lock, subclass)		\
	_raw_spin_lock(((void)(subclass), (lock)))
209
# define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
210
# define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
211 212
#endif

I
Ingo Molnar 已提交
213
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
214

215
#define raw_spin_lock_irqsave(lock, flags)			\
216 217
	do {						\
		typecheck(unsigned long, flags);	\
218
		flags = _raw_spin_lock_irqsave(lock);	\
219
	} while (0)
220 221

#ifdef CONFIG_DEBUG_LOCK_ALLOC
222
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
223 224
	do {								\
		typecheck(unsigned long, flags);			\
225
		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
226
	} while (0)
227
#else
228
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
229 230
	do {								\
		typecheck(unsigned long, flags);			\
231
		flags = _raw_spin_lock_irqsave(lock);			\
232
	} while (0)
233 234
#endif

L
Linus Torvalds 已提交
235
#else
236

237
#define raw_spin_lock_irqsave(lock, flags)		\
238 239
	do {						\
		typecheck(unsigned long, flags);	\
240
		_raw_spin_lock_irqsave(lock, flags);	\
241
	} while (0)
242

243 244
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
	raw_spin_lock_irqsave(lock, flags)
245

L
Linus Torvalds 已提交
246 247
#endif

248 249 250 251
#define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
#define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
#define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
#define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
L
Linus Torvalds 已提交
252

253 254 255
#define raw_spin_unlock_irqrestore(lock, flags)		\
	do {							\
		typecheck(unsigned long, flags);		\
256
		_raw_spin_unlock_irqrestore(lock, flags);	\
257
	} while (0)
258
#define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
L
Linus Torvalds 已提交
259

260 261
#define raw_spin_trylock_bh(lock) \
	__cond_lock(lock, _raw_spin_trylock_bh(lock))
L
Linus Torvalds 已提交
262

263
#define raw_spin_trylock_irq(lock) \
L
Linus Torvalds 已提交
264 265
({ \
	local_irq_disable(); \
266
	raw_spin_trylock(lock) ? \
I
Ingo Molnar 已提交
267
	1 : ({ local_irq_enable(); 0;  }); \
L
Linus Torvalds 已提交
268 269
})

270
#define raw_spin_trylock_irqsave(lock, flags) \
L
Linus Torvalds 已提交
271 272
({ \
	local_irq_save(flags); \
273
	raw_spin_trylock(lock) ? \
I
Ingo Molnar 已提交
274
	1 : ({ local_irq_restore(flags); 0; }); \
L
Linus Torvalds 已提交
275 276
})

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
/**
 * raw_spin_can_lock - would raw_spin_trylock() succeed?
 * @lock: the spinlock in question.
 */
#define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))

/* Include rwlock functions */
#include <linux/rwlock.h>

/*
 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
 */
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux/spinlock_api_smp.h>
#else
# include <linux/spinlock_api_up.h>
#endif

/*
 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
 */

static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
	return &lock->rlock;
}

#define spin_lock_init(_lock)				\
do {							\
	spinlock_check(_lock);				\
	raw_spin_lock_init(&(_lock)->rlock);		\
} while (0)

static inline void spin_lock(spinlock_t *lock)
{
	raw_spin_lock(&lock->rlock);
}

static inline void spin_lock_bh(spinlock_t *lock)
{
	raw_spin_lock_bh(&lock->rlock);
}

static inline int spin_trylock(spinlock_t *lock)
{
	return raw_spin_trylock(&lock->rlock);
}

#define spin_lock_nested(lock, subclass)			\
do {								\
	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
} while (0)

330 331 332 333 334
#define spin_lock_bh_nested(lock, subclass)			\
do {								\
	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
} while (0)

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
#define spin_lock_nest_lock(lock, nest_lock)				\
do {									\
	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
} while (0)

static inline void spin_lock_irq(spinlock_t *lock)
{
	raw_spin_lock_irq(&lock->rlock);
}

#define spin_lock_irqsave(lock, flags)				\
do {								\
	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
} while (0)

#define spin_lock_irqsave_nested(lock, flags, subclass)			\
do {									\
	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)

static inline void spin_unlock(spinlock_t *lock)
{
	raw_spin_unlock(&lock->rlock);
}

static inline void spin_unlock_bh(spinlock_t *lock)
{
	raw_spin_unlock_bh(&lock->rlock);
}

static inline void spin_unlock_irq(spinlock_t *lock)
{
	raw_spin_unlock_irq(&lock->rlock);
}

static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
	raw_spin_unlock_irqrestore(&lock->rlock, flags);
}

static inline int spin_trylock_bh(spinlock_t *lock)
{
	return raw_spin_trylock_bh(&lock->rlock);
}

static inline int spin_trylock_irq(spinlock_t *lock)
{
	return raw_spin_trylock_irq(&lock->rlock);
}

#define spin_trylock_irqsave(lock, flags)			\
({								\
	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})

static inline void spin_unlock_wait(spinlock_t *lock)
{
	raw_spin_unlock_wait(&lock->rlock);
}

static inline int spin_is_locked(spinlock_t *lock)
{
	return raw_spin_is_locked(&lock->rlock);
}

static inline int spin_is_contended(spinlock_t *lock)
{
	return raw_spin_is_contended(&lock->rlock);
}

static inline int spin_can_lock(spinlock_t *lock)
{
	return raw_spin_can_lock(&lock->rlock);
}

410
#define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
411

L
Linus Torvalds 已提交
412
/*
I
Ingo Molnar 已提交
413 414
 * Pull the atomic_t declaration:
 * (asm-mips/atomic.h needs above definitions)
L
Linus Torvalds 已提交
415
 */
A
Arun Sharma 已提交
416
#include <linux/atomic.h>
I
Ingo Molnar 已提交
417 418 419 420
/**
 * atomic_dec_and_lock - lock on reaching reference count zero
 * @atomic: the atomic counter
 * @lock: the spinlock in question
421 422 423
 *
 * Decrements @atomic by 1.  If the result is 0, returns true and locks
 * @lock.  Returns false for all other cases.
L
Linus Torvalds 已提交
424
 */
I
Ingo Molnar 已提交
425 426
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
427
		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
L
Linus Torvalds 已提交
428 429

#endif /* __LINUX_SPINLOCK_H */