spinlock_api_smp.h 5.2 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#ifndef __LINUX_SPINLOCK_API_SMP_H
#define __LINUX_SPINLOCK_API_SMP_H

#ifndef __LINUX_SPINLOCK_H
# error "please don't include this file directly"
#endif

/*
 * include/linux/spinlock_api_smp.h
 *
 * spinlock API declarations on SMP (and debug)
 * (implemented in kernel/spinlock.c)
 *
 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
 * Released under the General Public License (GPL).
 */

int in_lock_functions(unsigned long addr);

20
#define assert_raw_spin_locked(x)	BUG_ON(!raw_spin_is_locked(x))
I
Ingo Molnar 已提交
21

22 23
void __lockfunc _spin_lock(raw_spinlock_t *lock)	__acquires(lock);
void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
24
							__acquires(lock);
25 26
void __lockfunc
_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
P
Peter Zijlstra 已提交
27
							__acquires(lock);
28 29
void __lockfunc _spin_lock_bh(raw_spinlock_t *lock)	__acquires(lock);
void __lockfunc _spin_lock_irq(raw_spinlock_t *lock)	__acquires(lock);
30

31
unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
32
							__acquires(lock);
33 34
unsigned long __lockfunc
_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
35
							__acquires(lock);
36 37 38 39 40 41 42
int __lockfunc _spin_trylock(raw_spinlock_t *lock);
int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
void __lockfunc _spin_unlock(raw_spinlock_t *lock)	__releases(lock);
void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock)	__releases(lock);
void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock)	__releases(lock);
void __lockfunc
_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
43
							__releases(lock);
I
Ingo Molnar 已提交
44

45
#ifdef CONFIG_INLINE_SPIN_LOCK
46 47 48
#define _spin_lock(lock) __spin_lock(lock)
#endif

49
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
50 51 52
#define _spin_lock_bh(lock) __spin_lock_bh(lock)
#endif

53
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
54 55 56
#define _spin_lock_irq(lock) __spin_lock_irq(lock)
#endif

57
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
58 59 60
#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
#endif

61
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
62 63 64
#define _spin_trylock(lock) __spin_trylock(lock)
#endif

65
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
66 67 68
#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
#endif

69
#ifdef CONFIG_INLINE_SPIN_UNLOCK
70 71 72
#define _spin_unlock(lock) __spin_unlock(lock)
#endif

73
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
74 75 76
#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
#endif

77
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
78 79 80
#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
#endif

81
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
82 83 84
#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
#endif

85
static inline int __spin_trylock(raw_spinlock_t *lock)
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
{
	preempt_disable();
	if (_raw_spin_trylock(lock)) {
		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}
	preempt_enable();
	return 0;
}

/*
 * If lockdep is enabled then we use the non-preemption spin-ops
 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
 */
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)

103
static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
{
	unsigned long flags;

	local_irq_save(flags);
	preempt_disable();
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	/*
	 * On lockdep we dont want the hand-coded irq-enable of
	 * _raw_spin_lock_flags() code, because lockdep assumes
	 * that interrupts are not re-enabled during lock-acquire:
	 */
#ifdef CONFIG_LOCKDEP
	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
#else
	_raw_spin_lock_flags(lock, &flags);
#endif
	return flags;
}

123
static inline void __spin_lock_irq(raw_spinlock_t *lock)
124 125 126 127 128 129 130
{
	local_irq_disable();
	preempt_disable();
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}

131
static inline void __spin_lock_bh(raw_spinlock_t *lock)
132 133 134 135 136 137 138
{
	local_bh_disable();
	preempt_disable();
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}

139
static inline void __spin_lock(raw_spinlock_t *lock)
140 141 142 143 144 145 146 147
{
	preempt_disable();
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}

#endif /* CONFIG_PREEMPT */

148
static inline void __spin_unlock(raw_spinlock_t *lock)
149 150 151 152 153 154
{
	spin_release(&lock->dep_map, 1, _RET_IP_);
	_raw_spin_unlock(lock);
	preempt_enable();
}

155
static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
156 157 158 159 160 161 162 163
					    unsigned long flags)
{
	spin_release(&lock->dep_map, 1, _RET_IP_);
	_raw_spin_unlock(lock);
	local_irq_restore(flags);
	preempt_enable();
}

164
static inline void __spin_unlock_irq(raw_spinlock_t *lock)
165 166 167 168 169 170 171
{
	spin_release(&lock->dep_map, 1, _RET_IP_);
	_raw_spin_unlock(lock);
	local_irq_enable();
	preempt_enable();
}

172
static inline void __spin_unlock_bh(raw_spinlock_t *lock)
173 174 175 176 177 178 179
{
	spin_release(&lock->dep_map, 1, _RET_IP_);
	_raw_spin_unlock(lock);
	preempt_enable_no_resched();
	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}

180
static inline int __spin_trylock_bh(raw_spinlock_t *lock)
181 182 183 184 185 186 187 188 189 190 191 192
{
	local_bh_disable();
	preempt_disable();
	if (_raw_spin_trylock(lock)) {
		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}
	preempt_enable_no_resched();
	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
	return 0;
}

193 194
#include <linux/rwlock_api_smp.h>

I
Ingo Molnar 已提交
195
#endif /* __LINUX_SPINLOCK_API_SMP_H */