preempt.h 5.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H

/*
 * include/linux/preempt.h - macros for accessing and manipulating
 * preempt_count (used for kernel preemption, interrupt count, etc.)
 */

#include <linux/linkage.h>
10
#include <linux/list.h>
L
Linus Torvalds 已提交
11

12 13 14 15 16 17
/*
 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
 * the other bits -- can't include that header due to inclusion hell.
 */
#define PREEMPT_NEED_RESCHED	0x80000000

18
#include <asm/preempt.h>
19

20
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
21 22 23
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
L
Linus Torvalds 已提交
24
#else
25 26 27
#define preempt_count_add(val)	__preempt_count_add(val)
#define preempt_count_sub(val)	__preempt_count_sub(val)
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
L
Linus Torvalds 已提交
28 29
#endif

30 31
#define __preempt_count_inc() __preempt_count_add(1)
#define __preempt_count_dec() __preempt_count_sub(1)
32

33 34
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
35 36 37

#ifdef CONFIG_PREEMPT_COUNT

L
Linus Torvalds 已提交
38 39
#define preempt_disable() \
do { \
40
	preempt_count_inc(); \
L
Linus Torvalds 已提交
41 42 43
	barrier(); \
} while (0)

44
#define sched_preempt_enable_no_resched() \
L
Linus Torvalds 已提交
45 46
do { \
	barrier(); \
47
	preempt_count_dec(); \
L
Linus Torvalds 已提交
48 49
} while (0)

50
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
51

52
#ifdef CONFIG_PREEMPT
L
Linus Torvalds 已提交
53 54
#define preempt_enable() \
do { \
55 56
	barrier(); \
	if (unlikely(preempt_count_dec_and_test())) \
57
		__preempt_schedule(); \
L
Linus Torvalds 已提交
58 59
} while (0)

60 61 62
#define preempt_check_resched() \
do { \
	if (should_resched()) \
63
		__preempt_schedule(); \
64 65 66
} while (0)

#else
67 68 69 70 71
#define preempt_enable() \
do { \
	barrier(); \
	preempt_count_dec(); \
} while (0)
72 73
#define preempt_check_resched() do { } while (0)
#endif
74 75 76

#define preempt_disable_notrace() \
do { \
77
	__preempt_count_inc(); \
78 79 80 81 82 83
	barrier(); \
} while (0)

#define preempt_enable_no_resched_notrace() \
do { \
	barrier(); \
84
	__preempt_count_dec(); \
85 86
} while (0)

87 88
#ifdef CONFIG_PREEMPT

89 90
#ifndef CONFIG_CONTEXT_TRACKING
#define __preempt_schedule_context() __preempt_schedule()
91 92
#endif

93 94
#define preempt_enable_notrace() \
do { \
95 96
	barrier(); \
	if (unlikely(__preempt_count_dec_and_test())) \
97
		__preempt_schedule_context(); \
98
} while (0)
99
#else
100 101 102 103 104
#define preempt_enable_notrace() \
do { \
	barrier(); \
	__preempt_count_dec(); \
} while (0)
105
#endif
106

107
#else /* !CONFIG_PREEMPT_COUNT */
L
Linus Torvalds 已提交
108

109 110 111 112 113 114
/*
 * Even if we don't have any preemption, we need preempt disable/enable
 * to be barriers, so that we don't have things like get_user/put_user
 * that can cause faults and scheduling migrate into our preempt-protected
 * region.
 */
115
#define preempt_disable()			barrier()
116
#define sched_preempt_enable_no_resched()	barrier()
117 118 119
#define preempt_enable_no_resched()		barrier()
#define preempt_enable()			barrier()
#define preempt_check_resched()			do { } while (0)
120 121 122 123

#define preempt_disable_notrace()		barrier()
#define preempt_enable_no_resched_notrace()	barrier()
#define preempt_enable_notrace()		barrier()
124

125
#endif /* CONFIG_PREEMPT_COUNT */
L
Linus Torvalds 已提交
126

127 128 129 130 131 132 133 134 135 136
#ifdef MODULE
/*
 * Modules have no business playing preemption tricks.
 */
#undef sched_preempt_enable_no_resched
#undef preempt_enable_no_resched
#undef preempt_enable_no_resched_notrace
#undef preempt_check_resched
#endif

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
#ifdef CONFIG_PREEMPT
#define preempt_set_need_resched() \
do { \
	set_preempt_need_resched(); \
} while (0)
#define preempt_fold_need_resched() \
do { \
	if (tif_need_resched()) \
		set_preempt_need_resched(); \
} while (0)
#else
#define preempt_set_need_resched() do { } while (0)
#define preempt_fold_need_resched() do { } while (0)
#endif

152 153 154 155 156 157 158 159 160 161 162 163
#ifdef CONFIG_PREEMPT_NOTIFIERS

struct preempt_notifier;

/**
 * preempt_ops - notifiers called when a task is preempted and rescheduled
 * @sched_in: we're about to be rescheduled:
 *    notifier: struct preempt_notifier for the task being scheduled
 *    cpu:  cpu we're scheduled on
 * @sched_out: we've just been preempted
 *    notifier: struct preempt_notifier for the task being preempted
 *    next: the task that's kicking us out
164 165 166 167 168
 *
 * Please note that sched_in and out are called under different
 * contexts.  sched_out is called with rq lock held and irq disabled
 * while sched_in is called without rq lock and irq enabled.  This
 * difference is intentional and depended upon by its users.
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
 */
struct preempt_ops {
	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
	void (*sched_out)(struct preempt_notifier *notifier,
			  struct task_struct *next);
};

/**
 * preempt_notifier - key for installing preemption notifiers
 * @link: internal use
 * @ops: defines the notifier functions to be called
 *
 * Usually used in conjunction with container_of().
 */
struct preempt_notifier {
	struct hlist_node link;
	struct preempt_ops *ops;
};

void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);

static inline void preempt_notifier_init(struct preempt_notifier *notifier,
				     struct preempt_ops *ops)
{
	INIT_HLIST_NODE(&notifier->link);
	notifier->ops = ops;
}

#endif

L
Linus Torvalds 已提交
200
#endif /* __LINUX_PREEMPT_H */