preempt.h 4.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H

/*
 * include/linux/preempt.h - macros for accessing and manipulating
 * preempt_count (used for kernel preemption, interrupt count, etc.)
 */

#include <linux/linkage.h>
10
#include <linux/list.h>
L
Linus Torvalds 已提交
11

12 13 14 15 16 17
/*
 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
 * the other bits -- can't include that header due to inclusion hell.
 */
#define PREEMPT_NEED_RESCHED	0x80000000

18
#include <asm/preempt.h>
19

20
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
21 22
  extern void add_preempt_count(int val);
  extern void sub_preempt_count(int val);
L
Linus Torvalds 已提交
23
#else
24 25
# define add_preempt_count(val)	do { *preempt_count_ptr() += (val); } while (0)
# define sub_preempt_count(val)	do { *preempt_count_ptr() -= (val); } while (0)
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34
#endif

#define inc_preempt_count() add_preempt_count(1)
#define dec_preempt_count() sub_preempt_count(1)

#ifdef CONFIG_PREEMPT

asmlinkage void preempt_schedule(void);

35 36
#define preempt_check_resched() \
do { \
37
	if (unlikely(!*preempt_count_ptr())) \
38 39 40
		preempt_schedule(); \
} while (0)

41 42 43 44 45 46
#ifdef CONFIG_CONTEXT_TRACKING

void preempt_schedule_context(void);

#define preempt_check_resched_context() \
do { \
47
	if (unlikely(!*preempt_count_ptr())) \
48 49 50 51 52 53 54 55
		preempt_schedule_context(); \
} while (0)
#else

#define preempt_check_resched_context() preempt_check_resched()

#endif /* CONFIG_CONTEXT_TRACKING */

56 57 58
#else /* !CONFIG_PREEMPT */

#define preempt_check_resched()		do { } while (0)
59
#define preempt_check_resched_context()	do { } while (0)
60 61 62 63 64 65

#endif /* CONFIG_PREEMPT */


#ifdef CONFIG_PREEMPT_COUNT

L
Linus Torvalds 已提交
66 67 68 69 70 71
#define preempt_disable() \
do { \
	inc_preempt_count(); \
	barrier(); \
} while (0)

72
#define sched_preempt_enable_no_resched() \
L
Linus Torvalds 已提交
73 74 75 76 77
do { \
	barrier(); \
	dec_preempt_count(); \
} while (0)

78 79
#define preempt_enable_no_resched()	sched_preempt_enable_no_resched()

L
Linus Torvalds 已提交
80 81 82 83 84 85
#define preempt_enable() \
do { \
	preempt_enable_no_resched(); \
	preempt_check_resched(); \
} while (0)

86 87
/* For debugging and tracer internals only! */
#define add_preempt_count_notrace(val)			\
88
	do { *preempt_count_ptr() += (val); } while (0)
89
#define sub_preempt_count_notrace(val)			\
90
	do { *preempt_count_ptr() -= (val); } while (0)
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)

#define preempt_disable_notrace() \
do { \
	inc_preempt_count_notrace(); \
	barrier(); \
} while (0)

#define preempt_enable_no_resched_notrace() \
do { \
	barrier(); \
	dec_preempt_count_notrace(); \
} while (0)

/* preempt_check_resched is OK to trace */
#define preempt_enable_notrace() \
do { \
	preempt_enable_no_resched_notrace(); \
110
	preempt_check_resched_context(); \
111 112
} while (0)

113
#else /* !CONFIG_PREEMPT_COUNT */
L
Linus Torvalds 已提交
114

115 116 117 118 119 120 121 122 123 124 125 126 127 128
/*
 * Even if we don't have any preemption, we need preempt disable/enable
 * to be barriers, so that we don't have things like get_user/put_user
 * that can cause faults and scheduling migrate into our preempt-protected
 * region.
 */
#define preempt_disable()		barrier()
#define sched_preempt_enable_no_resched()	barrier()
#define preempt_enable_no_resched()	barrier()
#define preempt_enable()		barrier()

#define preempt_disable_notrace()		barrier()
#define preempt_enable_no_resched_notrace()	barrier()
#define preempt_enable_notrace()		barrier()
129

130
#endif /* CONFIG_PREEMPT_COUNT */
L
Linus Torvalds 已提交
131

132 133 134 135 136 137 138 139 140 141 142 143
#ifdef CONFIG_PREEMPT_NOTIFIERS

struct preempt_notifier;

/**
 * preempt_ops - notifiers called when a task is preempted and rescheduled
 * @sched_in: we're about to be rescheduled:
 *    notifier: struct preempt_notifier for the task being scheduled
 *    cpu:  cpu we're scheduled on
 * @sched_out: we've just been preempted
 *    notifier: struct preempt_notifier for the task being preempted
 *    next: the task that's kicking us out
144 145 146 147 148
 *
 * Please note that sched_in and out are called under different
 * contexts.  sched_out is called with rq lock held and irq disabled
 * while sched_in is called without rq lock and irq enabled.  This
 * difference is intentional and depended upon by its users.
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
 */
struct preempt_ops {
	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
	void (*sched_out)(struct preempt_notifier *notifier,
			  struct task_struct *next);
};

/**
 * preempt_notifier - key for installing preemption notifiers
 * @link: internal use
 * @ops: defines the notifier functions to be called
 *
 * Usually used in conjunction with container_of().
 */
struct preempt_notifier {
	struct hlist_node link;
	struct preempt_ops *ops;
};

void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);

static inline void preempt_notifier_init(struct preempt_notifier *notifier,
				     struct preempt_ops *ops)
{
	INIT_HLIST_NODE(&notifier->link);
	notifier->ops = ops;
}

#endif

L
Linus Torvalds 已提交
180
#endif /* __LINUX_PREEMPT_H */