spinlock_api_smp.h 5.4 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#ifndef __LINUX_SPINLOCK_API_SMP_H
#define __LINUX_SPINLOCK_API_SMP_H

#ifndef __LINUX_SPINLOCK_H
# error "please don't include this file directly"
#endif

/*
 * include/linux/spinlock_api_smp.h
 *
 * spinlock API declarations on SMP (and debug)
 * (implemented in kernel/spinlock.c)
 *
 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
 * Released under the General Public License (GPL).
 */

int in_lock_functions(unsigned long addr);

20
#define assert_raw_spin_locked(x)	BUG_ON(!raw_spin_is_locked(x))
I
Ingo Molnar 已提交
21

22 23 24
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)		__acquires(lock);
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
								__acquires(lock);
25
void __lockfunc
26 27 28 29 30 31 32 33
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
								__acquires(lock);
void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)		__acquires(lock);
void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
								__acquires(lock);

unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
								__acquires(lock);
34
unsigned long __lockfunc
35 36 37 38 39 40 41
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
								__acquires(lock);
int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)		__releases(lock);
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)	__releases(lock);
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)	__releases(lock);
42
void __lockfunc
43 44
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
								__releases(lock);
I
Ingo Molnar 已提交
45

46
#ifdef CONFIG_INLINE_SPIN_LOCK
47
#define _raw_spin_lock(lock) __raw_spin_lock(lock)
48 49
#endif

50
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
51
#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
52 53
#endif

54
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
55
#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
56 57
#endif

58
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
59
#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
60 61
#endif

62
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
63
#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
64 65
#endif

66
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
67
#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
68 69
#endif

70
#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
71
#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
72 73
#endif

74
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
75
#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
76 77
#endif

78
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
79
#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
80 81
#endif

82
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
83
#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
84 85
#endif

86
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
87 88
{
	preempt_disable();
89
	if (do_raw_spin_trylock(lock)) {
90 91 92 93 94 95 96 97 98 99 100 101 102 103
		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}
	preempt_enable();
	return 0;
}

/*
 * If lockdep is enabled then we use the non-preemption spin-ops
 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
 */
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)

104
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
105 106 107 108 109 110 111 112
{
	unsigned long flags;

	local_irq_save(flags);
	preempt_disable();
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	/*
	 * On lockdep we dont want the hand-coded irq-enable of
113
	 * do_raw_spin_lock_flags() code, because lockdep assumes
114 115 116
	 * that interrupts are not re-enabled during lock-acquire:
	 */
#ifdef CONFIG_LOCKDEP
117
	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
118
#else
119
	do_raw_spin_lock_flags(lock, &flags);
120 121 122 123
#endif
	return flags;
}

124
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
125 126 127 128
{
	local_irq_disable();
	preempt_disable();
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
129
	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
130 131
}

132
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
133 134 135 136
{
	local_bh_disable();
	preempt_disable();
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
137
	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
138 139
}

140
static inline void __raw_spin_lock(raw_spinlock_t *lock)
141 142 143
{
	preempt_disable();
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
144
	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
145 146 147 148
}

#endif /* CONFIG_PREEMPT */

149
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
150 151
{
	spin_release(&lock->dep_map, 1, _RET_IP_);
152
	do_raw_spin_unlock(lock);
153 154 155
	preempt_enable();
}

156
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
157 158 159
					    unsigned long flags)
{
	spin_release(&lock->dep_map, 1, _RET_IP_);
160
	do_raw_spin_unlock(lock);
161 162 163 164
	local_irq_restore(flags);
	preempt_enable();
}

165
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
166 167
{
	spin_release(&lock->dep_map, 1, _RET_IP_);
168
	do_raw_spin_unlock(lock);
169 170 171 172
	local_irq_enable();
	preempt_enable();
}

173
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
174 175
{
	spin_release(&lock->dep_map, 1, _RET_IP_);
176
	do_raw_spin_unlock(lock);
177 178 179 180
	preempt_enable_no_resched();
	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}

181
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
182 183 184
{
	local_bh_disable();
	preempt_disable();
185
	if (do_raw_spin_trylock(lock)) {
186 187 188 189 190 191 192 193
		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}
	preempt_enable_no_resched();
	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
	return 0;
}

194 195
#include <linux/rwlock_api_smp.h>

I
Ingo Molnar 已提交
196
#endif /* __LINUX_SPINLOCK_API_SMP_H */