preempt.h 8.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H

/*
 * include/linux/preempt.h - macros for accessing and manipulating
 * preempt_count (used for kernel preemption, interrupt count, etc.)
 */

#include <linux/linkage.h>
10
#include <linux/list.h>
L
Linus Torvalds 已提交
11

12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * We put the hardirq and softirq counter into the preemption
 * counter. The bitmask has the following meaning:
 *
 * - bits 0-7 are the preemption count (max preemption depth: 256)
 * - bits 8-15 are the softirq count (max # of softirqs: 256)
 *
 * The hardirq count could in theory be the same as the number of
 * interrupts in the system, but we run all interrupt handlers with
 * interrupts disabled, so we cannot have nesting interrupts. Though
 * there are a few palaeontologic drivers which reenable interrupts in
 * the handler, so we need more than one bit here.
 *
25 26 27 28 29 30
 *         PREEMPT_MASK:	0x000000ff
 *         SOFTIRQ_MASK:	0x0000ff00
 *         HARDIRQ_MASK:	0x000f0000
 *             NMI_MASK:	0x00100000
 *       PREEMPT_ACTIVE:	0x00200000
 * PREEMPT_NEED_RESCHED:	0x80000000
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
 */
#define PREEMPT_BITS	8
#define SOFTIRQ_BITS	8
#define HARDIRQ_BITS	4
#define NMI_BITS	1

#define PREEMPT_SHIFT	0
#define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT	(HARDIRQ_SHIFT + HARDIRQ_BITS)

#define __IRQ_MASK(x)	((1UL << (x))-1)

#define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK	(__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)

#define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET	(1UL << NMI_SHIFT)

#define SOFTIRQ_DISABLE_OFFSET	(2 * SOFTIRQ_OFFSET)

#define PREEMPT_ACTIVE_BITS	1
#define PREEMPT_ACTIVE_SHIFT	(NMI_SHIFT + NMI_BITS)
#define PREEMPT_ACTIVE	(__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)

60 61 62 63 64 65
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED	0x80000000

/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
#define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
#define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
#define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
				 | NMI_MASK))

/*
 * Are we doing bottom half or hardware interrupt processing?
 * Are we in a softirq context? Interrupt context?
 * in_softirq - Are we currently processing softirq or have bh disabled?
 * in_serving_softirq - Are we currently processing softirq?
 */
#define in_irq()		(hardirq_count())
#define in_softirq()		(softirq_count())
#define in_interrupt()		(irq_count())
#define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)

/*
 * Are we in NMI context?
 */
#define in_nmi()	(preempt_count() & NMI_MASK)

#if defined(CONFIG_PREEMPT_COUNT)
88
# define PREEMPT_DISABLE_OFFSET 1
89
#else
90
# define PREEMPT_DISABLE_OFFSET 0
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
#endif

/*
 * The preempt_count offset needed for things like:
 *
 *  spin_lock_bh()
 *
 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
 * softirqs, such that unlock sequences of:
 *
 *  spin_unlock();
 *  local_bh_enable();
 *
 * Work as expected.
 */
106
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
107 108 109 110 111 112 113 114 115 116 117 118

/*
 * Are we running in atomic context?  WARNING: this macro cannot
 * always detect atomic context; in particular, it cannot know about
 * held spinlocks in non-preemptible kernels.  Thus it should not be
 * used in the general case to determine whether sleeping is possible.
 * Do not use in_atomic() in driver code.
 */
#define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != 0)

/*
 * Check whether we were atomic before we did preempt_disable():
119
 * (used by the scheduler)
120 121
 */
#define in_atomic_preempt_off() \
122
		((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
123

124
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
125 126 127
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
L
Linus Torvalds 已提交
128
#else
129 130 131
#define preempt_count_add(val)	__preempt_count_add(val)
#define preempt_count_sub(val)	__preempt_count_sub(val)
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
L
Linus Torvalds 已提交
132 133
#endif

134 135
#define __preempt_count_inc() __preempt_count_add(1)
#define __preempt_count_dec() __preempt_count_sub(1)
136

137 138
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
139

140 141 142 143 144 145 146 147 148 149 150 151
#define preempt_active_enter() \
do { \
	preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
	barrier(); \
} while (0)

#define preempt_active_exit() \
do { \
	barrier(); \
	preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
} while (0)

152 153
#ifdef CONFIG_PREEMPT_COUNT

L
Linus Torvalds 已提交
154 155
#define preempt_disable() \
do { \
156
	preempt_count_inc(); \
L
Linus Torvalds 已提交
157 158 159
	barrier(); \
} while (0)

160
#define sched_preempt_enable_no_resched() \
L
Linus Torvalds 已提交
161 162
do { \
	barrier(); \
163
	preempt_count_dec(); \
L
Linus Torvalds 已提交
164 165
} while (0)

166
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
167

168 169
#define preemptible()	(preempt_count() == 0 && !irqs_disabled())

170
#ifdef CONFIG_PREEMPT
L
Linus Torvalds 已提交
171 172
#define preempt_enable() \
do { \
173 174
	barrier(); \
	if (unlikely(preempt_count_dec_and_test())) \
175
		__preempt_schedule(); \
L
Linus Torvalds 已提交
176 177
} while (0)

178 179 180
#define preempt_check_resched() \
do { \
	if (should_resched()) \
181
		__preempt_schedule(); \
182 183 184
} while (0)

#else
185 186 187 188 189
#define preempt_enable() \
do { \
	barrier(); \
	preempt_count_dec(); \
} while (0)
190 191
#define preempt_check_resched() do { } while (0)
#endif
192 193 194

#define preempt_disable_notrace() \
do { \
195
	__preempt_count_inc(); \
196 197 198 199 200 201
	barrier(); \
} while (0)

#define preempt_enable_no_resched_notrace() \
do { \
	barrier(); \
202
	__preempt_count_dec(); \
203 204
} while (0)

205 206
#ifdef CONFIG_PREEMPT

207 208
#ifndef CONFIG_CONTEXT_TRACKING
#define __preempt_schedule_context() __preempt_schedule()
209 210
#endif

211 212
#define preempt_enable_notrace() \
do { \
213 214
	barrier(); \
	if (unlikely(__preempt_count_dec_and_test())) \
215
		__preempt_schedule_context(); \
216
} while (0)
217
#else
218 219 220 221 222
#define preempt_enable_notrace() \
do { \
	barrier(); \
	__preempt_count_dec(); \
} while (0)
223
#endif
224

225
#else /* !CONFIG_PREEMPT_COUNT */
L
Linus Torvalds 已提交
226

227 228 229 230 231 232
/*
 * Even if we don't have any preemption, we need preempt disable/enable
 * to be barriers, so that we don't have things like get_user/put_user
 * that can cause faults and scheduling migrate into our preempt-protected
 * region.
 */
233
#define preempt_disable()			barrier()
234
#define sched_preempt_enable_no_resched()	barrier()
235 236 237
#define preempt_enable_no_resched()		barrier()
#define preempt_enable()			barrier()
#define preempt_check_resched()			do { } while (0)
238 239 240 241

#define preempt_disable_notrace()		barrier()
#define preempt_enable_no_resched_notrace()	barrier()
#define preempt_enable_notrace()		barrier()
242
#define preemptible()				0
243

244
#endif /* CONFIG_PREEMPT_COUNT */
L
Linus Torvalds 已提交
245

246 247 248 249 250 251 252 253 254 255
#ifdef MODULE
/*
 * Modules have no business playing preemption tricks.
 */
#undef sched_preempt_enable_no_resched
#undef preempt_enable_no_resched
#undef preempt_enable_no_resched_notrace
#undef preempt_check_resched
#endif

256 257 258 259 260 261 262 263 264 265
#define preempt_set_need_resched() \
do { \
	set_preempt_need_resched(); \
} while (0)
#define preempt_fold_need_resched() \
do { \
	if (tif_need_resched()) \
		set_preempt_need_resched(); \
} while (0)

266 267 268 269 270 271 272 273 274 275 276 277
#ifdef CONFIG_PREEMPT_NOTIFIERS

struct preempt_notifier;

/**
 * preempt_ops - notifiers called when a task is preempted and rescheduled
 * @sched_in: we're about to be rescheduled:
 *    notifier: struct preempt_notifier for the task being scheduled
 *    cpu:  cpu we're scheduled on
 * @sched_out: we've just been preempted
 *    notifier: struct preempt_notifier for the task being preempted
 *    next: the task that's kicking us out
278 279 280 281 282
 *
 * Please note that sched_in and out are called under different
 * contexts.  sched_out is called with rq lock held and irq disabled
 * while sched_in is called without rq lock and irq enabled.  This
 * difference is intentional and depended upon by its users.
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
 */
struct preempt_ops {
	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
	void (*sched_out)(struct preempt_notifier *notifier,
			  struct task_struct *next);
};

/**
 * preempt_notifier - key for installing preemption notifiers
 * @link: internal use
 * @ops: defines the notifier functions to be called
 *
 * Usually used in conjunction with container_of().
 */
struct preempt_notifier {
	struct hlist_node link;
	struct preempt_ops *ops;
};

void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);

static inline void preempt_notifier_init(struct preempt_notifier *notifier,
				     struct preempt_ops *ops)
{
	INIT_HLIST_NODE(&notifier->link);
	notifier->ops = ops;
}

#endif

L
Linus Torvalds 已提交
314
#endif /* __LINUX_PREEMPT_H */