sched_rt.c 5.7 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7 8 9
/*
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
 * policies)
 */

/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
A
Alexey Dobriyan 已提交
10
static void update_curr_rt(struct rq *rq)
I
Ingo Molnar 已提交
11 12 13 14 15 16 17
{
	struct task_struct *curr = rq->curr;
	u64 delta_exec;

	if (!task_has_rt_policy(curr))
		return;

18
	delta_exec = rq->clock - curr->se.exec_start;
I
Ingo Molnar 已提交
19 20
	if (unlikely((s64)delta_exec < 0))
		delta_exec = 0;
I
Ingo Molnar 已提交
21 22

	schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
I
Ingo Molnar 已提交
23 24

	curr->se.sum_exec_runtime += delta_exec;
25
	curr->se.exec_start = rq->clock;
I
Ingo Molnar 已提交
26 27
}

28
static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
I
Ingo Molnar 已提交
29 30 31 32 33 34 35 36 37 38
{
	struct rt_prio_array *array = &rq->rt.active;

	list_add_tail(&p->run_list, array->queue + p->prio);
	__set_bit(p->prio, array->bitmap);
}

/*
 * Adding/removing a task to/from a priority array:
 */
39
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
I
Ingo Molnar 已提交
40 41 42
{
	struct rt_prio_array *array = &rq->rt.active;

43
	update_curr_rt(rq);
I
Ingo Molnar 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

	list_del(&p->run_list);
	if (list_empty(array->queue + p->prio))
		__clear_bit(p->prio, array->bitmap);
}

/*
 * Put task to the end of the run list without the overhead of dequeue
 * followed by enqueue.
 */
static void requeue_task_rt(struct rq *rq, struct task_struct *p)
{
	struct rt_prio_array *array = &rq->rt.active;

	list_move_tail(&p->run_list, array->queue + p->prio);
}

static void
62
yield_task_rt(struct rq *rq)
I
Ingo Molnar 已提交
63
{
64
	requeue_task_rt(rq, rq->curr);
I
Ingo Molnar 已提交
65 66 67 68 69 70 71 72 73 74 75
}

/*
 * Preempt the current task with a newly woken task if needed:
 */
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
{
	if (p->prio < rq->curr->prio)
		resched_task(rq->curr);
}

76
static struct task_struct *pick_next_task_rt(struct rq *rq)
I
Ingo Molnar 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89
{
	struct rt_prio_array *array = &rq->rt.active;
	struct task_struct *next;
	struct list_head *queue;
	int idx;

	idx = sched_find_first_bit(array->bitmap);
	if (idx >= MAX_RT_PRIO)
		return NULL;

	queue = array->queue + idx;
	next = list_entry(queue->next, struct task_struct, run_list);

90
	next->se.exec_start = rq->clock;
I
Ingo Molnar 已提交
91 92 93 94

	return next;
}

95
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
I
Ingo Molnar 已提交
96
{
97
	update_curr_rt(rq);
I
Ingo Molnar 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
	p->se.exec_start = 0;
}

/*
 * Load-balancing iterator. Note: while the runqueue stays locked
 * during the whole iteration, the current task might be
 * dequeued so the iterator has to be dequeue-safe. Here we
 * achieve that by always pre-iterating before returning
 * the current task:
 */
static struct task_struct *load_balance_start_rt(void *arg)
{
	struct rq *rq = arg;
	struct rt_prio_array *array = &rq->rt.active;
	struct list_head *head, *curr;
	struct task_struct *p;
	int idx;

	idx = sched_find_first_bit(array->bitmap);
	if (idx >= MAX_RT_PRIO)
		return NULL;

	head = array->queue + idx;
	curr = head->prev;

	p = list_entry(curr, struct task_struct, run_list);

	curr = curr->prev;

	rq->rt.rt_load_balance_idx = idx;
	rq->rt.rt_load_balance_head = head;
	rq->rt.rt_load_balance_curr = curr;

	return p;
}

static struct task_struct *load_balance_next_rt(void *arg)
{
	struct rq *rq = arg;
	struct rt_prio_array *array = &rq->rt.active;
	struct list_head *head, *curr;
	struct task_struct *p;
	int idx;

	idx = rq->rt.rt_load_balance_idx;
	head = rq->rt.rt_load_balance_head;
	curr = rq->rt.rt_load_balance_curr;

	/*
	 * If we arrived back to the head again then
	 * iterate to the next queue (if any):
	 */
	if (unlikely(head == curr)) {
		int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);

		if (next_idx >= MAX_RT_PRIO)
			return NULL;

		idx = next_idx;
		head = array->queue + idx;
		curr = head->prev;

		rq->rt.rt_load_balance_idx = idx;
		rq->rt.rt_load_balance_head = head;
	}

	p = list_entry(curr, struct task_struct, run_list);

	curr = curr->prev;

	rq->rt.rt_load_balance_curr = curr;

	return p;
}

P
Peter Williams 已提交
173
static unsigned long
I
Ingo Molnar 已提交
174
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
175 176 177
		unsigned long max_load_move,
		struct sched_domain *sd, enum cpu_idle_type idle,
		int *all_pinned, int *this_best_prio)
I
Ingo Molnar 已提交
178 179 180 181 182 183 184 185 186 187
{
	struct rq_iterator rt_rq_iterator;

	rt_rq_iterator.start = load_balance_start_rt;
	rt_rq_iterator.next = load_balance_next_rt;
	/* pass 'busiest' rq argument into
	 * load_balance_[start|next]_rt iterators
	 */
	rt_rq_iterator.arg = busiest;

188 189 190 191 192 193 194 195 196 197 198 199 200
	return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
			     idle, all_pinned, this_best_prio, &rt_rq_iterator);
}

static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
		 struct sched_domain *sd, enum cpu_idle_type idle)
{
	struct rq_iterator rt_rq_iterator;

	rt_rq_iterator.start = load_balance_start_rt;
	rt_rq_iterator.next = load_balance_next_rt;
	rt_rq_iterator.arg = busiest;
I
Ingo Molnar 已提交
201

202 203
	return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
				  &rt_rq_iterator);
I
Ingo Molnar 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217
}

static void task_tick_rt(struct rq *rq, struct task_struct *p)
{
	/*
	 * RR tasks need a special form of timeslice management.
	 * FIFO tasks have no timeslices.
	 */
	if (p->policy != SCHED_RR)
		return;

	if (--p->time_slice)
		return;

D
Dmitry Adamushko 已提交
218
	p->time_slice = DEF_TIMESLICE;
I
Ingo Molnar 已提交
219

220 221 222 223 224 225 226 227
	/*
	 * Requeue to the end of queue if we are not the only element
	 * on the queue:
	 */
	if (p->run_list.prev != p->run_list.next) {
		requeue_task_rt(rq, p);
		set_tsk_need_resched(p);
	}
I
Ingo Molnar 已提交
228 229
}

230 231 232 233 234 235 236
static void set_curr_task_rt(struct rq *rq)
{
	struct task_struct *p = rq->curr;

	p->se.exec_start = rq->clock;
}

237 238
const struct sched_class rt_sched_class = {
	.next			= &fair_sched_class,
I
Ingo Molnar 已提交
239 240 241 242 243 244 245 246 247 248
	.enqueue_task		= enqueue_task_rt,
	.dequeue_task		= dequeue_task_rt,
	.yield_task		= yield_task_rt,

	.check_preempt_curr	= check_preempt_curr_rt,

	.pick_next_task		= pick_next_task_rt,
	.put_prev_task		= put_prev_task_rt,

	.load_balance		= load_balance_rt,
249
	.move_one_task		= move_one_task_rt,
I
Ingo Molnar 已提交
250

251
	.set_curr_task          = set_curr_task_rt,
I
Ingo Molnar 已提交
252 253
	.task_tick		= task_tick_rt,
};