spinlock.h 10.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H

/*
I
Ingo Molnar 已提交
5 6 7 8 9 10
 * include/linux/spinlock.h - generic spinlock/rwlock declarations
 *
 * here's the role of the various spinlock/rwlock related include files:
 *
 * on SMP builds:
 *
11
 *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
I
Ingo Molnar 已提交
12 13 14 15 16
 *                        initializers
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
17
 *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
I
Ingo Molnar 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *                        implementations, mostly inline assembly code
 *
 *   (also included on UP-debug builds:)
 *
 *  linux/spinlock_api_smp.h:
 *                        contains the prototypes for the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
 *
 * on UP builds:
 *
 *  linux/spinlock_type_up.h:
 *                        contains the generic, simplified UP spinlock type.
 *                        (which is an empty structure on non-debug builds)
 *
 *  linux/spinlock_types.h:
 *                        defines the generic type and initializers
 *
 *  linux/spinlock_up.h:
37
 *                        contains the arch_spin_*()/etc. version of UP
I
Ingo Molnar 已提交
38 39 40 41 42 43 44 45 46
 *                        builds. (which are NOPs on non-debug, non-preempt
 *                        builds)
 *
 *   (included on UP-non-debug builds:)
 *
 *  linux/spinlock_api_up.h:
 *                        builds the _spin_*() APIs.
 *
 *  linux/spinlock.h:     builds the final spin_*() APIs.
L
Linus Torvalds 已提交
47 48
 */

49
#include <linux/typecheck.h>
L
Linus Torvalds 已提交
50 51 52 53 54 55
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
A
Andrew Morton 已提交
56
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
57 58 59 60 61 62

#include <asm/system.h>

/*
 * Must define these before including other files, inline functions need them
 */
63
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71 72 73 74

#define LOCK_SECTION_START(extra)               \
        ".subsection 1\n\t"                     \
        extra                                   \
        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
        LOCK_SECTION_NAME ":\n\t"               \
        ".endif\n"

#define LOCK_SECTION_END                        \
        ".previous\n\t"

75
#define __lockfunc __attribute__((section(".spinlock.text")))
L
Linus Torvalds 已提交
76 77

/*
78
 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
L
Linus Torvalds 已提交
79
 */
I
Ingo Molnar 已提交
80
#include <linux/spinlock_types.h>
L
Linus Torvalds 已提交
81 82

/*
83
 * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
L
Linus Torvalds 已提交
84
 */
85
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
86
# include <asm/spinlock.h>
L
Linus Torvalds 已提交
87
#else
I
Ingo Molnar 已提交
88
# include <linux/spinlock_up.h>
L
Linus Torvalds 已提交
89 90
#endif

91
#ifdef CONFIG_DEBUG_SPINLOCK
92 93 94
  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
				   struct lock_class_key *key);
# define raw_spin_lock_init(lock)				\
95 96 97
do {								\
	static struct lock_class_key __key;			\
								\
98
	__raw_spin_lock_init((lock), #lock, &__key);		\
99 100 101
} while (0)

#else
102 103
# define raw_spin_lock_init(lock)				\
	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
104 105
#endif

106
#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
I
Ingo Molnar 已提交
107

N
Nick Piggin 已提交
108
#ifdef CONFIG_GENERIC_LOCKBREAK
109
#define raw_spin_is_contended(lock) ((lock)->break_lock)
N
Nick Piggin 已提交
110
#else
111

112
#ifdef arch_spin_is_contended
113
#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
114
#else
115
#define raw_spin_is_contended(lock)	(((void)(lock), 0))
116
#endif /*arch_spin_is_contended*/
N
Nick Piggin 已提交
117 118
#endif

119 120 121 122 123
/* The lock does not imply full memory barrier. */
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif

I
Ingo Molnar 已提交
124
/**
125
 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
I
Ingo Molnar 已提交
126 127
 * @lock: the spinlock in question.
 */
128
#define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
I
Ingo Molnar 已提交
129 130

#ifdef CONFIG_DEBUG_SPINLOCK
131 132 133 134
 extern void do_raw_spin_lock(raw_spinlock_t *lock);
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
 extern void do_raw_spin_unlock(raw_spinlock_t *lock);
I
Ingo Molnar 已提交
135
#else
136
static inline void do_raw_spin_lock(raw_spinlock_t *lock)
137 138 139 140 141
{
	arch_spin_lock(&lock->raw_lock);
}

static inline void
142
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
143 144 145 146
{
	arch_spin_lock_flags(&lock->raw_lock, *flags);
}

147
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
148 149 150 151
{
	return arch_spin_trylock(&(lock)->raw_lock);
}

152
static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
153 154 155
{
	arch_spin_unlock(&lock->raw_lock);
}
I
Ingo Molnar 已提交
156
#endif
L
Linus Torvalds 已提交
157 158

/*
159 160 161 162
 * Define the various spin_lock methods.  Note we define these
 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
 * various methods are defined as nops in the case they are not
 * required.
L
Linus Torvalds 已提交
163
 */
164
#define raw_spin_trylock(lock)		__cond_lock(lock, _spin_trylock(lock))
L
Linus Torvalds 已提交
165

166
#define raw_spin_lock(lock)		_spin_lock(lock)
167 168

#ifdef CONFIG_DEBUG_LOCK_ALLOC
169 170
# define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
# define raw_spin_lock_nest_lock(lock, nest_lock)			\
P
Peter Zijlstra 已提交
171 172 173 174
	 do {								\
		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
		 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
	 } while (0)
175
#else
176 177
# define raw_spin_lock_nested(lock, subclass)		_spin_lock(lock)
# define raw_spin_lock_nest_lock(lock, nest_lock)	_spin_lock(lock)
178 179
#endif

I
Ingo Molnar 已提交
180
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
181

182
#define raw_spin_lock_irqsave(lock, flags)			\
183 184 185 186
	do {						\
		typecheck(unsigned long, flags);	\
		flags = _spin_lock_irqsave(lock);	\
	} while (0)
187 188

#ifdef CONFIG_DEBUG_LOCK_ALLOC
189
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
190 191 192 193
	do {								\
		typecheck(unsigned long, flags);			\
		flags = _spin_lock_irqsave_nested(lock, subclass);	\
	} while (0)
194
#else
195
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
196 197 198 199
	do {								\
		typecheck(unsigned long, flags);			\
		flags = _spin_lock_irqsave(lock);			\
	} while (0)
200 201
#endif

L
Linus Torvalds 已提交
202
#else
203

204
#define raw_spin_lock_irqsave(lock, flags)		\
205 206 207 208
	do {						\
		typecheck(unsigned long, flags);	\
		_spin_lock_irqsave(lock, flags);	\
	} while (0)
209

210 211
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
	raw_spin_lock_irqsave(lock, flags)
212

L
Linus Torvalds 已提交
213 214
#endif

215 216 217 218
#define raw_spin_lock_irq(lock)	_spin_lock_irq(lock)
#define raw_spin_lock_bh(lock)		_spin_lock_bh(lock)
#define raw_spin_unlock(lock)		_spin_unlock(lock)
#define raw_spin_unlock_irq(lock)	_spin_unlock_irq(lock)
L
Linus Torvalds 已提交
219

220 221 222
#define raw_spin_unlock_irqrestore(lock, flags)		\
	do {							\
		typecheck(unsigned long, flags);		\
223 224
		_spin_unlock_irqrestore(lock, flags);	\
	} while (0)
225
#define raw_spin_unlock_bh(lock)	_spin_unlock_bh(lock)
L
Linus Torvalds 已提交
226

227
#define raw_spin_trylock_bh(lock)	__cond_lock(lock, _spin_trylock_bh(lock))
L
Linus Torvalds 已提交
228

229
#define raw_spin_trylock_irq(lock) \
L
Linus Torvalds 已提交
230 231
({ \
	local_irq_disable(); \
232
	raw_spin_trylock(lock) ? \
I
Ingo Molnar 已提交
233
	1 : ({ local_irq_enable(); 0;  }); \
L
Linus Torvalds 已提交
234 235
})

236
#define raw_spin_trylock_irqsave(lock, flags) \
L
Linus Torvalds 已提交
237 238
({ \
	local_irq_save(flags); \
239
	raw_spin_trylock(lock) ? \
I
Ingo Molnar 已提交
240
	1 : ({ local_irq_restore(flags); 0; }); \
L
Linus Torvalds 已提交
241 242
})

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
/**
 * raw_spin_can_lock - would raw_spin_trylock() succeed?
 * @lock: the spinlock in question.
 */
#define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))

/* Include rwlock functions */
#include <linux/rwlock.h>

/*
 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
 */
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux/spinlock_api_smp.h>
#else
# include <linux/spinlock_api_up.h>
#endif

/*
 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
 */

static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
	return &lock->rlock;
}

#define spin_lock_init(_lock)				\
do {							\
	spinlock_check(_lock);				\
	raw_spin_lock_init(&(_lock)->rlock);		\
} while (0)

static inline void spin_lock(spinlock_t *lock)
{
	raw_spin_lock(&lock->rlock);
}

static inline void spin_lock_bh(spinlock_t *lock)
{
	raw_spin_lock_bh(&lock->rlock);
}

static inline int spin_trylock(spinlock_t *lock)
{
	return raw_spin_trylock(&lock->rlock);
}

#define spin_lock_nested(lock, subclass)			\
do {								\
	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
} while (0)

#define spin_lock_nest_lock(lock, nest_lock)				\
do {									\
	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
} while (0)

static inline void spin_lock_irq(spinlock_t *lock)
{
	raw_spin_lock_irq(&lock->rlock);
}

#define spin_lock_irqsave(lock, flags)				\
do {								\
	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
} while (0)

#define spin_lock_irqsave_nested(lock, flags, subclass)			\
do {									\
	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)

static inline void spin_unlock(spinlock_t *lock)
{
	raw_spin_unlock(&lock->rlock);
}

static inline void spin_unlock_bh(spinlock_t *lock)
{
	raw_spin_unlock_bh(&lock->rlock);
}

static inline void spin_unlock_irq(spinlock_t *lock)
{
	raw_spin_unlock_irq(&lock->rlock);
}

static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
	raw_spin_unlock_irqrestore(&lock->rlock, flags);
}

static inline int spin_trylock_bh(spinlock_t *lock)
{
	return raw_spin_trylock_bh(&lock->rlock);
}

static inline int spin_trylock_irq(spinlock_t *lock)
{
	return raw_spin_trylock_irq(&lock->rlock);
}

#define spin_trylock_irqsave(lock, flags)			\
({								\
	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})

static inline void spin_unlock_wait(spinlock_t *lock)
{
	raw_spin_unlock_wait(&lock->rlock);
}

static inline int spin_is_locked(spinlock_t *lock)
{
	return raw_spin_is_locked(&lock->rlock);
}

static inline int spin_is_contended(spinlock_t *lock)
{
	return raw_spin_is_contended(&lock->rlock);
}

static inline int spin_can_lock(spinlock_t *lock)
{
	return raw_spin_can_lock(&lock->rlock);
}

static inline void assert_spin_locked(spinlock_t *lock)
{
	assert_raw_spin_locked(&lock->rlock);
}

L
Linus Torvalds 已提交
376
/*
I
Ingo Molnar 已提交
377 378
 * Pull the atomic_t declaration:
 * (asm-mips/atomic.h needs above definitions)
L
Linus Torvalds 已提交
379
 */
I
Ingo Molnar 已提交
380 381 382 383 384
#include <asm/atomic.h>
/**
 * atomic_dec_and_lock - lock on reaching reference count zero
 * @atomic: the atomic counter
 * @lock: the spinlock in question
385 386 387
 *
 * Decrements @atomic by 1.  If the result is 0, returns true and locks
 * @lock.  Returns false for all other cases.
L
Linus Torvalds 已提交
388
 */
I
Ingo Molnar 已提交
389 390
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
391
		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
L
Linus Torvalds 已提交
392 393

#endif /* __LINUX_SPINLOCK_H */