preempt.h 6.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H

/*
 * include/linux/preempt.h - macros for accessing and manipulating
 * preempt_count (used for kernel preemption, interrupt count, etc.)
 */

A
Al Viro 已提交
9
#include <linux/thread_info.h>
L
Linus Torvalds 已提交
10
#include <linux/linkage.h>
11
#include <linux/list.h>
L
Linus Torvalds 已提交
12

13 14 15 16 17 18 19 20 21 22
/*
 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
 * the other bits -- can't include that header due to inclusion hell.
 */
#define PREEMPT_NEED_RESCHED	0x80000000

/*
 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
 * that think a non-zero value indicates we cannot preempt.
 */
23 24
static __always_inline int preempt_count(void)
{
25
	return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
26 27 28 29 30 31 32
}

static __always_inline int *preempt_count_ptr(void)
{
	return &current_thread_info()->preempt_count;
}

33 34 35 36 37
/*
 * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
 * alternative is loosing a reschedule. Better schedule too often -- also this
 * should be a very rare operation.
 */
38 39 40 41 42
static __always_inline void preempt_count_set(int pc)
{
	*preempt_count_ptr() = pc;
}

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * We fold the NEED_RESCHED bit into the preempt count such that
 * preempt_enable() can decrement and test for needing to reschedule with a
 * single instruction.
 *
 * We invert the actual bit, so that when the decrement hits 0 we know we both
 * need to resched (the bit is cleared) and can resched (no preempt count).
 */

static __always_inline void set_preempt_need_resched(void)
{
	*preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
}

static __always_inline void clear_preempt_need_resched(void)
{
	*preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
}

static __always_inline bool test_preempt_need_resched(void)
{
	return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
}

67
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
68 69
  extern void add_preempt_count(int val);
  extern void sub_preempt_count(int val);
L
Linus Torvalds 已提交
70
#else
71 72
# define add_preempt_count(val)	do { *preempt_count_ptr() += (val); } while (0)
# define sub_preempt_count(val)	do { *preempt_count_ptr() -= (val); } while (0)
L
Linus Torvalds 已提交
73 74 75 76 77 78 79 80 81
#endif

#define inc_preempt_count() add_preempt_count(1)
#define dec_preempt_count() sub_preempt_count(1)

#ifdef CONFIG_PREEMPT

asmlinkage void preempt_schedule(void);

82 83
#define preempt_check_resched() \
do { \
84
	if (unlikely(!*preempt_count_ptr())) \
85 86 87
		preempt_schedule(); \
} while (0)

88 89 90 91 92 93
#ifdef CONFIG_CONTEXT_TRACKING

void preempt_schedule_context(void);

#define preempt_check_resched_context() \
do { \
94
	if (unlikely(!*preempt_count_ptr())) \
95 96 97 98 99 100 101 102
		preempt_schedule_context(); \
} while (0)
#else

#define preempt_check_resched_context() preempt_check_resched()

#endif /* CONFIG_CONTEXT_TRACKING */

103 104 105
#else /* !CONFIG_PREEMPT */

#define preempt_check_resched()		do { } while (0)
106
#define preempt_check_resched_context()	do { } while (0)
107 108 109 110 111 112

#endif /* CONFIG_PREEMPT */


#ifdef CONFIG_PREEMPT_COUNT

L
Linus Torvalds 已提交
113 114 115 116 117 118
#define preempt_disable() \
do { \
	inc_preempt_count(); \
	barrier(); \
} while (0)

119
#define sched_preempt_enable_no_resched() \
L
Linus Torvalds 已提交
120 121 122 123 124
do { \
	barrier(); \
	dec_preempt_count(); \
} while (0)

125 126
#define preempt_enable_no_resched()	sched_preempt_enable_no_resched()

L
Linus Torvalds 已提交
127 128 129 130 131 132
#define preempt_enable() \
do { \
	preempt_enable_no_resched(); \
	preempt_check_resched(); \
} while (0)

133 134
/* For debugging and tracer internals only! */
#define add_preempt_count_notrace(val)			\
135
	do { *preempt_count_ptr() += (val); } while (0)
136
#define sub_preempt_count_notrace(val)			\
137
	do { *preempt_count_ptr() -= (val); } while (0)
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)

#define preempt_disable_notrace() \
do { \
	inc_preempt_count_notrace(); \
	barrier(); \
} while (0)

#define preempt_enable_no_resched_notrace() \
do { \
	barrier(); \
	dec_preempt_count_notrace(); \
} while (0)

/* preempt_check_resched is OK to trace */
#define preempt_enable_notrace() \
do { \
	preempt_enable_no_resched_notrace(); \
157
	preempt_check_resched_context(); \
158 159
} while (0)

160
#else /* !CONFIG_PREEMPT_COUNT */
L
Linus Torvalds 已提交
161

162 163 164 165 166 167 168 169 170 171 172 173 174 175
/*
 * Even if we don't have any preemption, we need preempt disable/enable
 * to be barriers, so that we don't have things like get_user/put_user
 * that can cause faults and scheduling migrate into our preempt-protected
 * region.
 */
#define preempt_disable()		barrier()
#define sched_preempt_enable_no_resched()	barrier()
#define preempt_enable_no_resched()	barrier()
#define preempt_enable()		barrier()

#define preempt_disable_notrace()		barrier()
#define preempt_enable_no_resched_notrace()	barrier()
#define preempt_enable_notrace()		barrier()
176

177
#endif /* CONFIG_PREEMPT_COUNT */
L
Linus Torvalds 已提交
178

179 180 181 182 183 184 185 186 187 188 189 190
#ifdef CONFIG_PREEMPT_NOTIFIERS

struct preempt_notifier;

/**
 * preempt_ops - notifiers called when a task is preempted and rescheduled
 * @sched_in: we're about to be rescheduled:
 *    notifier: struct preempt_notifier for the task being scheduled
 *    cpu:  cpu we're scheduled on
 * @sched_out: we've just been preempted
 *    notifier: struct preempt_notifier for the task being preempted
 *    next: the task that's kicking us out
191 192 193 194 195
 *
 * Please note that sched_in and out are called under different
 * contexts.  sched_out is called with rq lock held and irq disabled
 * while sched_in is called without rq lock and irq enabled.  This
 * difference is intentional and depended upon by its users.
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
 */
struct preempt_ops {
	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
	void (*sched_out)(struct preempt_notifier *notifier,
			  struct task_struct *next);
};

/**
 * preempt_notifier - key for installing preemption notifiers
 * @link: internal use
 * @ops: defines the notifier functions to be called
 *
 * Usually used in conjunction with container_of().
 */
struct preempt_notifier {
	struct hlist_node link;
	struct preempt_ops *ops;
};

void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);

static inline void preempt_notifier_init(struct preempt_notifier *notifier,
				     struct preempt_ops *ops)
{
	INIT_HLIST_NODE(&notifier->link);
	notifier->ops = ops;
}

#endif

L
Linus Torvalds 已提交
227
#endif /* __LINUX_PREEMPT_H */