spinlock.h 10.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H

/*
I
Ingo Molnar 已提交
5 6 7 8 9 10
 * include/linux/spinlock.h - generic spinlock/rwlock declarations
 *
 * here's the role of the various spinlock/rwlock related include files:
 *
 * on SMP builds:
 *
11
 *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
I
Ingo Molnar 已提交
12 13 14 15 16
 *                        initializers
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
17
 *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
I
Ingo Molnar 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *                        implementations, mostly inline assembly code
 *
 *   (also included on UP-debug builds:)
 *
 *  linux/spinlock_api_smp.h:
 *                        contains the prototypes for the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
 *
 * on UP builds:
 *
 *  linux/spinlock_type_up.h:
 *                        contains the generic, simplified UP spinlock type.
 *                        (which is an empty structure on non-debug builds)
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
 *  linux/spinlock_up.h:
37
 *                        contains the arch_spin_*()/etc. version of UP
I
Ingo Molnar 已提交
38 39 40 41 42 43 44 45 46
 *                        builds. (which are NOPs on non-debug, non-preempt
 *                        builds)
 *
 *   (included on UP-non-debug builds:)
 *
 *  linux/spinlock_api_up.h:
 *                        builds the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
L
Linus Torvalds 已提交
47 48
 */

49
#include <linux/typecheck.h>
L
Linus Torvalds 已提交
50 51 52 53 54 55
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
A
Andrew Morton 已提交
56
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
57 58 59 60 61 62

#include <asm/system.h>

/*
 * Must define these before including other files, inline functions need them
 */
63
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71 72 73 74

#define LOCK_SECTION_START(extra)               \
        ".subsection 1\n\t"                     \
        extra                                   \
        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
        LOCK_SECTION_NAME ":\n\t"               \
        ".endif\n"

#define LOCK_SECTION_END                        \
        ".previous\n\t"

75
#define __lockfunc __attribute__((section(".spinlock.text")))
L
Linus Torvalds 已提交
76 77

/*
78
 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
L
Linus Torvalds 已提交
79
 */
I
Ingo Molnar 已提交
80
#include <linux/spinlock_types.h>
L
Linus Torvalds 已提交
81 82

/*
83
 * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
L
Linus Torvalds 已提交
84
 */
85
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
86
# include <asm/spinlock.h>
L
Linus Torvalds 已提交
87
#else
I
Ingo Molnar 已提交
88
# include <linux/spinlock_up.h>
L
Linus Torvalds 已提交
89 90
#endif

91
#ifdef CONFIG_DEBUG_SPINLOCK
92 93 94
  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
				   struct lock_class_key *key);
# define raw_spin_lock_init(lock)				\
95 96 97
do {								\
	static struct lock_class_key __key;			\
								\
98
	__raw_spin_lock_init((lock), #lock, &__key);		\
99 100 101
} while (0)

#else
102 103
# define raw_spin_lock_init(lock)				\
	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
104 105
#endif

106
#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
I
Ingo Molnar 已提交
107

N
Nick Piggin 已提交
108
#ifdef CONFIG_GENERIC_LOCKBREAK
109
#define raw_spin_is_contended(lock) ((lock)->break_lock)
N
Nick Piggin 已提交
110
#else
111

112
#ifdef arch_spin_is_contended
113
#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
114
#else
115
#define raw_spin_is_contended(lock)	(((void)(lock), 0))
116
#endif /*arch_spin_is_contended*/
N
Nick Piggin 已提交
117 118
#endif

119 120 121 122 123
/* The lock does not imply full memory barrier. */
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif

I
Ingo Molnar 已提交
124
/**
125
 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
I
Ingo Molnar 已提交
126 127
 * @lock: the spinlock in question.
 */
128
#define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
I
Ingo Molnar 已提交
129 130

#ifdef CONFIG_DEBUG_SPINLOCK
131
 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
132 133
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
134
 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
I
Ingo Molnar 已提交
135
#else
136
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
137
{
138
	__acquire(lock);
139 140 141 142
	arch_spin_lock(&lock->raw_lock);
}

static inline void
143
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
144
{
145
	__acquire(lock);
146 147 148
	arch_spin_lock_flags(&lock->raw_lock, *flags);
}

149
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
150 151 152 153
{
	return arch_spin_trylock(&(lock)->raw_lock);
}

154
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
155 156
{
	arch_spin_unlock(&lock->raw_lock);
157
	__release(lock);
158
}
I
Ingo Molnar 已提交
159
#endif
L
Linus Torvalds 已提交
160 161

/*
162 163 164 165
 * Define the various spin_lock methods.  Note we define these
 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
 * various methods are defined as nops in the case they are not
 * required.
L
Linus Torvalds 已提交
166
 */
167
#define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
L
Linus Torvalds 已提交
168

169
#define raw_spin_lock(lock)	_raw_spin_lock(lock)
170 171

#ifdef CONFIG_DEBUG_LOCK_ALLOC
172 173 174
# define raw_spin_lock_nested(lock, subclass) \
	_raw_spin_lock_nested(lock, subclass)

175
# define raw_spin_lock_nest_lock(lock, nest_lock)			\
P
Peter Zijlstra 已提交
176 177
	 do {								\
		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
178
		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
P
Peter Zijlstra 已提交
179
	 } while (0)
180
#else
181 182
# define raw_spin_lock_nested(lock, subclass)		_raw_spin_lock(lock)
# define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
183 184
#endif

I
Ingo Molnar 已提交
185
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
186

187
#define raw_spin_lock_irqsave(lock, flags)			\
188 189
	do {						\
		typecheck(unsigned long, flags);	\
190
		flags = _raw_spin_lock_irqsave(lock);	\
191
	} while (0)
192 193

#ifdef CONFIG_DEBUG_LOCK_ALLOC
194
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
195 196
	do {								\
		typecheck(unsigned long, flags);			\
197
		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
198
	} while (0)
199
#else
200
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
201 202
	do {								\
		typecheck(unsigned long, flags);			\
203
		flags = _raw_spin_lock_irqsave(lock);			\
204
	} while (0)
205 206
#endif

L
Linus Torvalds 已提交
207
#else
208

209
#define raw_spin_lock_irqsave(lock, flags)		\
210 211
	do {						\
		typecheck(unsigned long, flags);	\
212
		_raw_spin_lock_irqsave(lock, flags);	\
213
	} while (0)
214

215 216
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
	raw_spin_lock_irqsave(lock, flags)
217

L
Linus Torvalds 已提交
218 219
#endif

220 221 222 223
#define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
#define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
#define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
#define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
L
Linus Torvalds 已提交
224

225 226 227
#define raw_spin_unlock_irqrestore(lock, flags)		\
	do {							\
		typecheck(unsigned long, flags);		\
228
		_raw_spin_unlock_irqrestore(lock, flags);	\
229
	} while (0)
230
#define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
L
Linus Torvalds 已提交
231

232 233
#define raw_spin_trylock_bh(lock) \
	__cond_lock(lock, _raw_spin_trylock_bh(lock))
L
Linus Torvalds 已提交
234

235
#define raw_spin_trylock_irq(lock) \
L
Linus Torvalds 已提交
236 237
({ \
	local_irq_disable(); \
238
	raw_spin_trylock(lock) ? \
I
Ingo Molnar 已提交
239
	1 : ({ local_irq_enable(); 0;  }); \
L
Linus Torvalds 已提交
240 241
})

242
#define raw_spin_trylock_irqsave(lock, flags) \
L
Linus Torvalds 已提交
243 244
({ \
	local_irq_save(flags); \
245
	raw_spin_trylock(lock) ? \
I
Ingo Molnar 已提交
246
	1 : ({ local_irq_restore(flags); 0; }); \
L
Linus Torvalds 已提交
247 248
})

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
/**
 * raw_spin_can_lock - would raw_spin_trylock() succeed?
 * @lock: the spinlock in question.
 */
#define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))

/* Include rwlock functions */
#include <linux/rwlock.h>

/*
 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
 */
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux/spinlock_api_smp.h>
#else
# include <linux/spinlock_api_up.h>
#endif

/*
 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
 */

static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
	return &lock->rlock;
}

#define spin_lock_init(_lock)				\
do {							\
	spinlock_check(_lock);				\
	raw_spin_lock_init(&(_lock)->rlock);		\
} while (0)

static inline void spin_lock(spinlock_t *lock)
{
	raw_spin_lock(&lock->rlock);
}

static inline void spin_lock_bh(spinlock_t *lock)
{
	raw_spin_lock_bh(&lock->rlock);
}

static inline int spin_trylock(spinlock_t *lock)
{
	return raw_spin_trylock(&lock->rlock);
}

#define spin_lock_nested(lock, subclass)			\
do {								\
	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
} while (0)

#define spin_lock_nest_lock(lock, nest_lock)				\
do {									\
	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
} while (0)

static inline void spin_lock_irq(spinlock_t *lock)
{
	raw_spin_lock_irq(&lock->rlock);
}

#define spin_lock_irqsave(lock, flags)				\
do {								\
	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
} while (0)

#define spin_lock_irqsave_nested(lock, flags, subclass)			\
do {									\
	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)

static inline void spin_unlock(spinlock_t *lock)
{
	raw_spin_unlock(&lock->rlock);
}

static inline void spin_unlock_bh(spinlock_t *lock)
{
	raw_spin_unlock_bh(&lock->rlock);
}

static inline void spin_unlock_irq(spinlock_t *lock)
{
	raw_spin_unlock_irq(&lock->rlock);
}

static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
	raw_spin_unlock_irqrestore(&lock->rlock, flags);
}

static inline int spin_trylock_bh(spinlock_t *lock)
{
	return raw_spin_trylock_bh(&lock->rlock);
}

static inline int spin_trylock_irq(spinlock_t *lock)
{
	return raw_spin_trylock_irq(&lock->rlock);
}

#define spin_trylock_irqsave(lock, flags)			\
({								\
	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})

static inline void spin_unlock_wait(spinlock_t *lock)
{
	raw_spin_unlock_wait(&lock->rlock);
}

static inline int spin_is_locked(spinlock_t *lock)
{
	return raw_spin_is_locked(&lock->rlock);
}

static inline int spin_is_contended(spinlock_t *lock)
{
	return raw_spin_is_contended(&lock->rlock);
}

static inline int spin_can_lock(spinlock_t *lock)
{
	return raw_spin_can_lock(&lock->rlock);
}

static inline void assert_spin_locked(spinlock_t *lock)
{
	assert_raw_spin_locked(&lock->rlock);
}

L
Linus Torvalds 已提交
382
/*
I
Ingo Molnar 已提交
383 384
 * Pull the atomic_t declaration:
 * (asm-mips/atomic.h needs above definitions)
L
Linus Torvalds 已提交
385
 */
I
Ingo Molnar 已提交
386 387 388 389 390
#include <asm/atomic.h>
/**
 * atomic_dec_and_lock - lock on reaching reference count zero
 * @atomic: the atomic counter
 * @lock: the spinlock in question
391 392 393
 *
 * Decrements @atomic by 1.  If the result is 0, returns true and locks
 * @lock.  Returns false for all other cases.
L
Linus Torvalds 已提交
394
 */
I
Ingo Molnar 已提交
395 396
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
397
		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
L
Linus Torvalds 已提交
398 399

#endif /* __LINUX_SPINLOCK_H */