spinlock.h 10.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H

/*
I
Ingo Molnar 已提交
5 6 7 8 9 10
 * include/linux/spinlock.h - generic spinlock/rwlock declarations
 *
 * here's the role of the various spinlock/rwlock related include files:
 *
 * on SMP builds:
 *
11
 *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
I
Ingo Molnar 已提交
12 13 14 15 16
 *                        initializers
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
17
 *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
I
Ingo Molnar 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *                        implementations, mostly inline assembly code
 *
 *   (also included on UP-debug builds:)
 *
 *  linux/spinlock_api_smp.h:
 *                        contains the prototypes for the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
 *
 * on UP builds:
 *
 *  linux/spinlock_type_up.h:
 *                        contains the generic, simplified UP spinlock type.
 *                        (which is an empty structure on non-debug builds)
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
 *  linux/spinlock_up.h:
37
 *                        contains the arch_spin_*()/etc. version of UP
I
Ingo Molnar 已提交
38 39 40 41 42 43 44 45 46
 *                        builds. (which are NOPs on non-debug, non-preempt
 *                        builds)
 *
 *   (included on UP-non-debug builds:)
 *
 *  linux/spinlock_api_up.h:
 *                        builds the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
L
Linus Torvalds 已提交
47 48
 */

49
#include <linux/typecheck.h>
L
Linus Torvalds 已提交
50 51 52
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
D
David Howells 已提交
53
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
54 55 56
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
A
Andrew Morton 已提交
57
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
58 59 60 61 62 63

#include <asm/system.h>

/*
 * Must define these before including other files, inline functions need them
 */
64
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75

#define LOCK_SECTION_START(extra)               \
        ".subsection 1\n\t"                     \
        extra                                   \
        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
        LOCK_SECTION_NAME ":\n\t"               \
        ".endif\n"

#define LOCK_SECTION_END                        \
        ".previous\n\t"

76
#define __lockfunc __attribute__((section(".spinlock.text")))
L
Linus Torvalds 已提交
77 78

/*
79
 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
L
Linus Torvalds 已提交
80
 */
I
Ingo Molnar 已提交
81
#include <linux/spinlock_types.h>
L
Linus Torvalds 已提交
82 83

/*
84
 * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
L
Linus Torvalds 已提交
85
 */
86
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
87
# include <asm/spinlock.h>
L
Linus Torvalds 已提交
88
#else
I
Ingo Molnar 已提交
89
# include <linux/spinlock_up.h>
L
Linus Torvalds 已提交
90 91
#endif

92
#ifdef CONFIG_DEBUG_SPINLOCK
93 94 95
  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
				   struct lock_class_key *key);
# define raw_spin_lock_init(lock)				\
96 97 98
do {								\
	static struct lock_class_key __key;			\
								\
99
	__raw_spin_lock_init((lock), #lock, &__key);		\
100 101 102
} while (0)

#else
103 104
# define raw_spin_lock_init(lock)				\
	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 106
#endif

107
#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
I
Ingo Molnar 已提交
108

N
Nick Piggin 已提交
109
#ifdef CONFIG_GENERIC_LOCKBREAK
110
#define raw_spin_is_contended(lock) ((lock)->break_lock)
N
Nick Piggin 已提交
111
#else
112

113
#ifdef arch_spin_is_contended
114
#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
115
#else
116
#define raw_spin_is_contended(lock)	(((void)(lock), 0))
117
#endif /*arch_spin_is_contended*/
N
Nick Piggin 已提交
118 119
#endif

120 121 122 123 124
/* The lock does not imply full memory barrier. */
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif

I
Ingo Molnar 已提交
125
/**
126
 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
I
Ingo Molnar 已提交
127 128
 * @lock: the spinlock in question.
 */
129
#define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
I
Ingo Molnar 已提交
130 131

#ifdef CONFIG_DEBUG_SPINLOCK
132
 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
133 134
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
135
 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
I
Ingo Molnar 已提交
136
#else
137
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
138
{
139
	__acquire(lock);
140 141 142 143
	arch_spin_lock(&lock->raw_lock);
}

static inline void
144
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
145
{
146
	__acquire(lock);
147 148 149
	arch_spin_lock_flags(&lock->raw_lock, *flags);
}

150
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
151 152 153 154
{
	return arch_spin_trylock(&(lock)->raw_lock);
}

155
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
156 157
{
	arch_spin_unlock(&lock->raw_lock);
158
	__release(lock);
159
}
I
Ingo Molnar 已提交
160
#endif
L
Linus Torvalds 已提交
161 162

/*
163 164 165 166
 * Define the various spin_lock methods.  Note we define these
 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
 * various methods are defined as nops in the case they are not
 * required.
L
Linus Torvalds 已提交
167
 */
168
#define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
L
Linus Torvalds 已提交
169

170
#define raw_spin_lock(lock)	_raw_spin_lock(lock)
171 172

#ifdef CONFIG_DEBUG_LOCK_ALLOC
173 174 175
# define raw_spin_lock_nested(lock, subclass) \
	_raw_spin_lock_nested(lock, subclass)

176
# define raw_spin_lock_nest_lock(lock, nest_lock)			\
P
Peter Zijlstra 已提交
177 178
	 do {								\
		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
179
		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
P
Peter Zijlstra 已提交
180
	 } while (0)
181
#else
182 183
# define raw_spin_lock_nested(lock, subclass)		_raw_spin_lock(lock)
# define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
184 185
#endif

I
Ingo Molnar 已提交
186
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
187

188
#define raw_spin_lock_irqsave(lock, flags)			\
189 190
	do {						\
		typecheck(unsigned long, flags);	\
191
		flags = _raw_spin_lock_irqsave(lock);	\
192
	} while (0)
193 194

#ifdef CONFIG_DEBUG_LOCK_ALLOC
195
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
196 197
	do {								\
		typecheck(unsigned long, flags);			\
198
		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
199
	} while (0)
200
#else
201
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
202 203
	do {								\
		typecheck(unsigned long, flags);			\
204
		flags = _raw_spin_lock_irqsave(lock);			\
205
	} while (0)
206 207
#endif

L
Linus Torvalds 已提交
208
#else
209

210
#define raw_spin_lock_irqsave(lock, flags)		\
211 212
	do {						\
		typecheck(unsigned long, flags);	\
213
		_raw_spin_lock_irqsave(lock, flags);	\
214
	} while (0)
215

216 217
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
	raw_spin_lock_irqsave(lock, flags)
218

L
Linus Torvalds 已提交
219 220
#endif

221 222 223 224
#define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
#define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
#define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
#define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
L
Linus Torvalds 已提交
225

226 227 228
#define raw_spin_unlock_irqrestore(lock, flags)		\
	do {							\
		typecheck(unsigned long, flags);		\
229
		_raw_spin_unlock_irqrestore(lock, flags);	\
230
	} while (0)
231
#define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
L
Linus Torvalds 已提交
232

233 234
#define raw_spin_trylock_bh(lock) \
	__cond_lock(lock, _raw_spin_trylock_bh(lock))
L
Linus Torvalds 已提交
235

236
#define raw_spin_trylock_irq(lock) \
L
Linus Torvalds 已提交
237 238
({ \
	local_irq_disable(); \
239
	raw_spin_trylock(lock) ? \
I
Ingo Molnar 已提交
240
	1 : ({ local_irq_enable(); 0;  }); \
L
Linus Torvalds 已提交
241 242
})

243
#define raw_spin_trylock_irqsave(lock, flags) \
L
Linus Torvalds 已提交
244 245
({ \
	local_irq_save(flags); \
246
	raw_spin_trylock(lock) ? \
I
Ingo Molnar 已提交
247
	1 : ({ local_irq_restore(flags); 0; }); \
L
Linus Torvalds 已提交
248 249
})

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
/**
 * raw_spin_can_lock - would raw_spin_trylock() succeed?
 * @lock: the spinlock in question.
 */
#define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))

/* Include rwlock functions */
#include <linux/rwlock.h>

/*
 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
 */
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux/spinlock_api_smp.h>
#else
# include <linux/spinlock_api_up.h>
#endif

/*
 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
 */

static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
	return &lock->rlock;
}

#define spin_lock_init(_lock)				\
do {							\
	spinlock_check(_lock);				\
	raw_spin_lock_init(&(_lock)->rlock);		\
} while (0)

static inline void spin_lock(spinlock_t *lock)
{
	raw_spin_lock(&lock->rlock);
}

static inline void spin_lock_bh(spinlock_t *lock)
{
	raw_spin_lock_bh(&lock->rlock);
}

static inline int spin_trylock(spinlock_t *lock)
{
	return raw_spin_trylock(&lock->rlock);
}

#define spin_lock_nested(lock, subclass)			\
do {								\
	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
} while (0)

#define spin_lock_nest_lock(lock, nest_lock)				\
do {									\
	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
} while (0)

static inline void spin_lock_irq(spinlock_t *lock)
{
	raw_spin_lock_irq(&lock->rlock);
}

#define spin_lock_irqsave(lock, flags)				\
do {								\
	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
} while (0)

#define spin_lock_irqsave_nested(lock, flags, subclass)			\
do {									\
	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)

static inline void spin_unlock(spinlock_t *lock)
{
	raw_spin_unlock(&lock->rlock);
}

static inline void spin_unlock_bh(spinlock_t *lock)
{
	raw_spin_unlock_bh(&lock->rlock);
}

static inline void spin_unlock_irq(spinlock_t *lock)
{
	raw_spin_unlock_irq(&lock->rlock);
}

static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
	raw_spin_unlock_irqrestore(&lock->rlock, flags);
}

static inline int spin_trylock_bh(spinlock_t *lock)
{
	return raw_spin_trylock_bh(&lock->rlock);
}

static inline int spin_trylock_irq(spinlock_t *lock)
{
	return raw_spin_trylock_irq(&lock->rlock);
}

#define spin_trylock_irqsave(lock, flags)			\
({								\
	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})

static inline void spin_unlock_wait(spinlock_t *lock)
{
	raw_spin_unlock_wait(&lock->rlock);
}

static inline int spin_is_locked(spinlock_t *lock)
{
	return raw_spin_is_locked(&lock->rlock);
}

static inline int spin_is_contended(spinlock_t *lock)
{
	return raw_spin_is_contended(&lock->rlock);
}

static inline int spin_can_lock(spinlock_t *lock)
{
	return raw_spin_can_lock(&lock->rlock);
}

static inline void assert_spin_locked(spinlock_t *lock)
{
	assert_raw_spin_locked(&lock->rlock);
}

L
Linus Torvalds 已提交
383
/*
I
Ingo Molnar 已提交
384 385
 * Pull the atomic_t declaration:
 * (asm-mips/atomic.h needs above definitions)
L
Linus Torvalds 已提交
386
 */
I
Ingo Molnar 已提交
387 388 389 390 391
#include <asm/atomic.h>
/**
 * atomic_dec_and_lock - lock on reaching reference count zero
 * @atomic: the atomic counter
 * @lock: the spinlock in question
392 393 394
 *
 * Decrements @atomic by 1.  If the result is 0, returns true and locks
 * @lock.  Returns false for all other cases.
L
Linus Torvalds 已提交
395
 */
I
Ingo Molnar 已提交
396 397
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
398
		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
L
Linus Torvalds 已提交
399 400

#endif /* __LINUX_SPINLOCK_H */