rt.c 46.1 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5
/*
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
 * policies)
 */

6 7 8 9
#include "sched.h"

#include <linux/slab.h>

10 11
int sched_rr_timeslice = RR_TIMESLICE;

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);

struct rt_bandwidth def_rt_bandwidth;

static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
	struct rt_bandwidth *rt_b =
		container_of(timer, struct rt_bandwidth, rt_period_timer);
	ktime_t now;
	int overrun;
	int idle = 0;

	for (;;) {
		now = hrtimer_cb_get_time(timer);
		overrun = hrtimer_forward(timer, now, rt_b->rt_period);

		if (!overrun)
			break;

		idle = do_sched_rt_period_timer(rt_b, overrun);
	}

	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}

void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
	rt_b->rt_period = ns_to_ktime(period);
	rt_b->rt_runtime = runtime;

	raw_spin_lock_init(&rt_b->rt_runtime_lock);

	hrtimer_init(&rt_b->rt_period_timer,
			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	rt_b->rt_period_timer.function = sched_rt_period_timer;
}

static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
		return;

	if (hrtimer_active(&rt_b->rt_period_timer))
		return;

	raw_spin_lock(&rt_b->rt_runtime_lock);
	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
	raw_spin_unlock(&rt_b->rt_runtime_lock);
}

void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
{
	struct rt_prio_array *array;
	int i;

	array = &rt_rq->active;
	for (i = 0; i < MAX_RT_PRIO; i++) {
		INIT_LIST_HEAD(array->queue + i);
		__clear_bit(i, array->bitmap);
	}
	/* delimiter for bitsearch: */
	__set_bit(MAX_RT_PRIO, array->bitmap);

#if defined CONFIG_SMP
	rt_rq->highest_prio.curr = MAX_RT_PRIO;
	rt_rq->highest_prio.next = MAX_RT_PRIO;
	rt_rq->rt_nr_migratory = 0;
	rt_rq->overloaded = 0;
	plist_head_init(&rt_rq->pushable_tasks);
#endif

	rt_rq->rt_time = 0;
	rt_rq->rt_throttled = 0;
	rt_rq->rt_runtime = 0;
	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}

89
#ifdef CONFIG_RT_GROUP_SCHED
90 91 92 93
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
	hrtimer_cancel(&rt_b->rt_period_timer);
}
94 95 96

#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)

97 98
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
99 100 101
#ifdef CONFIG_SCHED_DEBUG
	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif
102 103 104 105 106 107 108 109 110 111 112 113 114
	return container_of(rt_se, struct task_struct, rt);
}

static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
	return rt_rq->rq;
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
	return rt_se->rt_rq;
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
void free_rt_sched_group(struct task_group *tg)
{
	int i;

	if (tg->rt_se)
		destroy_rt_bandwidth(&tg->rt_bandwidth);

	for_each_possible_cpu(i) {
		if (tg->rt_rq)
			kfree(tg->rt_rq[i]);
		if (tg->rt_se)
			kfree(tg->rt_se[i]);
	}

	kfree(tg->rt_rq);
	kfree(tg->rt_se);
}

void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
		struct sched_rt_entity *rt_se, int cpu,
		struct sched_rt_entity *parent)
{
	struct rq *rq = cpu_rq(cpu);

	rt_rq->highest_prio.curr = MAX_RT_PRIO;
	rt_rq->rt_nr_boosted = 0;
	rt_rq->rq = rq;
	rt_rq->tg = tg;

	tg->rt_rq[cpu] = rt_rq;
	tg->rt_se[cpu] = rt_se;

	if (!rt_se)
		return;

	if (!parent)
		rt_se->rt_rq = &rq->rt;
	else
		rt_se->rt_rq = parent->my_q;

	rt_se->my_q = rt_rq;
	rt_se->parent = parent;
	INIT_LIST_HEAD(&rt_se->run_list);
}

int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
	struct rt_rq *rt_rq;
	struct sched_rt_entity *rt_se;
	int i;

	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
	if (!tg->rt_rq)
		goto err;
	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
	if (!tg->rt_se)
		goto err;

	init_rt_bandwidth(&tg->rt_bandwidth,
			ktime_to_ns(def_rt_bandwidth.rt_period), 0);

	for_each_possible_cpu(i) {
		rt_rq = kzalloc_node(sizeof(struct rt_rq),
				     GFP_KERNEL, cpu_to_node(i));
		if (!rt_rq)
			goto err;

		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
				     GFP_KERNEL, cpu_to_node(i));
		if (!rt_se)
			goto err_free_rq;

		init_rt_rq(rt_rq, cpu_rq(i));
		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
	}

	return 1;

err_free_rq:
	kfree(rt_rq);
err:
	return 0;
}

200 201
#else /* CONFIG_RT_GROUP_SCHED */

202 203
#define rt_entity_is_task(rt_se) (1)

204 205 206 207 208
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
	return container_of(rt_se, struct task_struct, rt);
}

209 210 211 212 213 214 215 216 217 218 219 220 221
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
	return container_of(rt_rq, struct rq, rt);
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
	struct task_struct *p = rt_task_of(rt_se);
	struct rq *rq = task_rq(p);

	return &rq->rt;
}

222 223 224 225 226 227
void free_rt_sched_group(struct task_group *tg) { }

int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
	return 1;
}
228 229
#endif /* CONFIG_RT_GROUP_SCHED */

S
Steven Rostedt 已提交
230
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
231

232 233
static int pull_rt_task(struct rq *this_rq);

P
Peter Zijlstra 已提交
234 235 236 237 238 239
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
	/* Try to pull RT tasks here if we lower this rq's prio */
	return rq->rt.highest_prio.curr > prev->prio;
}

240
static inline int rt_overloaded(struct rq *rq)
S
Steven Rostedt 已提交
241
{
242
	return atomic_read(&rq->rd->rto_count);
S
Steven Rostedt 已提交
243
}
I
Ingo Molnar 已提交
244

S
Steven Rostedt 已提交
245 246
static inline void rt_set_overload(struct rq *rq)
{
247 248 249
	if (!rq->online)
		return;

250
	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
S
Steven Rostedt 已提交
251 252 253 254 255 256
	/*
	 * Make sure the mask is visible before we set
	 * the overload count. That is checked to determine
	 * if we should look at the mask. It would be a shame
	 * if we looked at the mask, but the mask was not
	 * updated yet.
P
Peter Zijlstra 已提交
257 258
	 *
	 * Matched by the barrier in pull_rt_task().
S
Steven Rostedt 已提交
259
	 */
P
Peter Zijlstra 已提交
260
	smp_wmb();
261
	atomic_inc(&rq->rd->rto_count);
S
Steven Rostedt 已提交
262
}
I
Ingo Molnar 已提交
263

S
Steven Rostedt 已提交
264 265
static inline void rt_clear_overload(struct rq *rq)
{
266 267 268
	if (!rq->online)
		return;

S
Steven Rostedt 已提交
269
	/* the order here really doesn't matter */
270
	atomic_dec(&rq->rd->rto_count);
271
	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
S
Steven Rostedt 已提交
272
}
273

274
static void update_rt_migration(struct rt_rq *rt_rq)
275
{
276
	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
277 278 279
		if (!rt_rq->overloaded) {
			rt_set_overload(rq_of_rt_rq(rt_rq));
			rt_rq->overloaded = 1;
280
		}
281 282 283
	} else if (rt_rq->overloaded) {
		rt_clear_overload(rq_of_rt_rq(rt_rq));
		rt_rq->overloaded = 0;
284
	}
285
}
S
Steven Rostedt 已提交
286

287 288
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
289 290
	struct task_struct *p;

291 292 293
	if (!rt_entity_is_task(rt_se))
		return;

294
	p = rt_task_of(rt_se);
295 296 297
	rt_rq = &rq_of_rt_rq(rt_rq)->rt;

	rt_rq->rt_nr_total++;
298
	if (p->nr_cpus_allowed > 1)
299 300 301 302 303 304 305
		rt_rq->rt_nr_migratory++;

	update_rt_migration(rt_rq);
}

static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
306 307
	struct task_struct *p;

308 309 310
	if (!rt_entity_is_task(rt_se))
		return;

311
	p = rt_task_of(rt_se);
312 313 314
	rt_rq = &rq_of_rt_rq(rt_rq)->rt;

	rt_rq->rt_nr_total--;
315
	if (p->nr_cpus_allowed > 1)
316 317 318 319 320
		rt_rq->rt_nr_migratory--;

	update_rt_migration(rt_rq);
}

321 322 323 324 325
static inline int has_pushable_tasks(struct rq *rq)
{
	return !plist_head_empty(&rq->rt.pushable_tasks);
}

P
Peter Zijlstra 已提交
326 327 328 329 330 331 332 333 334
static inline void set_post_schedule(struct rq *rq)
{
	/*
	 * We detect this state here so that we can avoid taking the RQ
	 * lock again later if there is no need to push
	 */
	rq->post_schedule = has_pushable_tasks(rq);
}

335 336 337 338 339
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
	plist_node_init(&p->pushable_tasks, p->prio);
	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
340 341 342 343

	/* Update the highest prio pushable task */
	if (p->prio < rq->rt.highest_prio.next)
		rq->rt.highest_prio.next = p->prio;
344 345 346 347 348 349
}

static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);

350 351 352 353 354 355 356
	/* Update the new highest prio pushable task */
	if (has_pushable_tasks(rq)) {
		p = plist_first_entry(&rq->rt.pushable_tasks,
				      struct task_struct, pushable_tasks);
		rq->rt.highest_prio.next = p->prio;
	} else
		rq->rt.highest_prio.next = MAX_RT_PRIO;
357 358
}

359 360
#else

361
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
P
Peter Zijlstra 已提交
362
{
P
Peter Zijlstra 已提交
363 364
}

365 366 367 368
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}

369
static inline
370 371 372 373
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}

374
static inline
375 376 377
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
378

P
Peter Zijlstra 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
	return false;
}

static inline int pull_rt_task(struct rq *this_rq)
{
	return 0;
}

static inline void set_post_schedule(struct rq *rq)
{
}
S
Steven Rostedt 已提交
392 393
#endif /* CONFIG_SMP */

P
Peter Zijlstra 已提交
394 395 396 397 398
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
	return !list_empty(&rt_se->run_list);
}

399
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
400

P
Peter Zijlstra 已提交
401
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
402 403
{
	if (!rt_rq->tg)
P
Peter Zijlstra 已提交
404
		return RUNTIME_INF;
P
Peter Zijlstra 已提交
405

P
Peter Zijlstra 已提交
406 407 408 409 410 411
	return rt_rq->rt_runtime;
}

static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
P
Peter Zijlstra 已提交
412 413
}

C
Cheng Xu 已提交
414 415
typedef struct task_group *rt_rq_iter_t;

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
static inline struct task_group *next_task_group(struct task_group *tg)
{
	do {
		tg = list_entry_rcu(tg->list.next,
			typeof(struct task_group), list);
	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));

	if (&tg->list == &task_groups)
		tg = NULL;

	return tg;
}

#define for_each_rt_rq(rt_rq, iter, rq)					\
	for (iter = container_of(&task_groups, typeof(*iter), list);	\
		(iter = next_task_group(iter)) &&			\
		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
C
Cheng Xu 已提交
433

P
Peter Zijlstra 已提交
434 435 436 437 438 439 440 441
#define for_each_sched_rt_entity(rt_se) \
	for (; rt_se; rt_se = rt_se->parent)

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
	return rt_se->my_q;
}

442
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
P
Peter Zijlstra 已提交
443 444
static void dequeue_rt_entity(struct sched_rt_entity *rt_se);

P
Peter Zijlstra 已提交
445
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
446
{
447
	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
448 449
	struct sched_rt_entity *rt_se;

450 451 452
	int cpu = cpu_of(rq_of_rt_rq(rt_rq));

	rt_se = rt_rq->tg->rt_se[cpu];
P
Peter Zijlstra 已提交
453

454 455
	if (rt_rq->rt_nr_running) {
		if (rt_se && !on_rt_rq(rt_se))
456
			enqueue_rt_entity(rt_se, false);
457
		if (rt_rq->highest_prio.curr < curr->prio)
458
			resched_task(curr);
P
Peter Zijlstra 已提交
459 460 461
	}
}

P
Peter Zijlstra 已提交
462
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
463
{
464
	struct sched_rt_entity *rt_se;
465
	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
466

467
	rt_se = rt_rq->tg->rt_se[cpu];
P
Peter Zijlstra 已提交
468 469 470 471 472

	if (rt_se && on_rt_rq(rt_se))
		dequeue_rt_entity(rt_se);
}

P
Peter Zijlstra 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}

static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
	struct rt_rq *rt_rq = group_rt_rq(rt_se);
	struct task_struct *p;

	if (rt_rq)
		return !!rt_rq->rt_nr_boosted;

	p = rt_task_of(rt_se);
	return p->prio != p->normal_prio;
}

490
#ifdef CONFIG_SMP
491
static inline const struct cpumask *sched_rt_period_mask(void)
492
{
N
Nathan Zimmer 已提交
493
	return this_rq()->rd->span;
494
}
P
Peter Zijlstra 已提交
495
#else
496
static inline const struct cpumask *sched_rt_period_mask(void)
497
{
498
	return cpu_online_mask;
499 500
}
#endif
P
Peter Zijlstra 已提交
501

502 503
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
P
Peter Zijlstra 已提交
504
{
505 506
	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
}
P
Peter Zijlstra 已提交
507

P
Peter Zijlstra 已提交
508 509 510 511 512
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
	return &rt_rq->tg->rt_bandwidth;
}

513
#else /* !CONFIG_RT_GROUP_SCHED */
514 515 516

static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
P
Peter Zijlstra 已提交
517 518 519 520 521 522
	return rt_rq->rt_runtime;
}

static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
	return ktime_to_ns(def_rt_bandwidth.rt_period);
P
Peter Zijlstra 已提交
523 524
}

C
Cheng Xu 已提交
525 526 527 528 529
typedef struct rt_rq *rt_rq_iter_t;

#define for_each_rt_rq(rt_rq, iter, rq) \
	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)

P
Peter Zijlstra 已提交
530 531 532 533 534 535 536 537
#define for_each_sched_rt_entity(rt_se) \
	for (; rt_se; rt_se = NULL)

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
	return NULL;
}

P
Peter Zijlstra 已提交
538
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
539
{
540 541
	if (rt_rq->rt_nr_running)
		resched_task(rq_of_rt_rq(rt_rq)->curr);
P
Peter Zijlstra 已提交
542 543
}

P
Peter Zijlstra 已提交
544
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
545 546 547
{
}

P
Peter Zijlstra 已提交
548 549 550 551
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
	return rt_rq->rt_throttled;
}
552

553
static inline const struct cpumask *sched_rt_period_mask(void)
554
{
555
	return cpu_online_mask;
556 557 558 559 560 561 562 563
}

static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
	return &cpu_rq(cpu)->rt;
}

P
Peter Zijlstra 已提交
564 565 566 567 568
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
	return &def_rt_bandwidth;
}

569
#endif /* CONFIG_RT_GROUP_SCHED */
570

P
Peter Zijlstra 已提交
571
#ifdef CONFIG_SMP
572 573 574
/*
 * We ran out of runtime, see if we can borrow some from our neighbours.
 */
575
static int do_balance_runtime(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
576 577
{
	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
578
	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
P
Peter Zijlstra 已提交
579 580 581
	int i, weight, more = 0;
	u64 rt_period;

582
	weight = cpumask_weight(rd->span);
P
Peter Zijlstra 已提交
583

584
	raw_spin_lock(&rt_b->rt_runtime_lock);
P
Peter Zijlstra 已提交
585
	rt_period = ktime_to_ns(rt_b->rt_period);
586
	for_each_cpu(i, rd->span) {
P
Peter Zijlstra 已提交
587 588 589 590 591 592
		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
		s64 diff;

		if (iter == rt_rq)
			continue;

593
		raw_spin_lock(&iter->rt_runtime_lock);
594 595 596 597 598
		/*
		 * Either all rqs have inf runtime and there's nothing to steal
		 * or __disable_runtime() below sets a specific rq to inf to
		 * indicate its been disabled and disalow stealing.
		 */
P
Peter Zijlstra 已提交
599 600 601
		if (iter->rt_runtime == RUNTIME_INF)
			goto next;

602 603 604 605
		/*
		 * From runqueues with spare time, take 1/n part of their
		 * spare time, but no more than our period.
		 */
P
Peter Zijlstra 已提交
606 607
		diff = iter->rt_runtime - iter->rt_time;
		if (diff > 0) {
608
			diff = div_u64((u64)diff, weight);
P
Peter Zijlstra 已提交
609 610 611 612 613 614
			if (rt_rq->rt_runtime + diff > rt_period)
				diff = rt_period - rt_rq->rt_runtime;
			iter->rt_runtime -= diff;
			rt_rq->rt_runtime += diff;
			more = 1;
			if (rt_rq->rt_runtime == rt_period) {
615
				raw_spin_unlock(&iter->rt_runtime_lock);
P
Peter Zijlstra 已提交
616 617 618
				break;
			}
		}
P
Peter Zijlstra 已提交
619
next:
620
		raw_spin_unlock(&iter->rt_runtime_lock);
P
Peter Zijlstra 已提交
621
	}
622
	raw_spin_unlock(&rt_b->rt_runtime_lock);
P
Peter Zijlstra 已提交
623 624 625

	return more;
}
P
Peter Zijlstra 已提交
626

627 628 629
/*
 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 */
P
Peter Zijlstra 已提交
630 631 632
static void __disable_runtime(struct rq *rq)
{
	struct root_domain *rd = rq->rd;
C
Cheng Xu 已提交
633
	rt_rq_iter_t iter;
P
Peter Zijlstra 已提交
634 635 636 637 638
	struct rt_rq *rt_rq;

	if (unlikely(!scheduler_running))
		return;

C
Cheng Xu 已提交
639
	for_each_rt_rq(rt_rq, iter, rq) {
P
Peter Zijlstra 已提交
640 641 642 643
		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
		s64 want;
		int i;

644 645
		raw_spin_lock(&rt_b->rt_runtime_lock);
		raw_spin_lock(&rt_rq->rt_runtime_lock);
646 647 648 649 650
		/*
		 * Either we're all inf and nobody needs to borrow, or we're
		 * already disabled and thus have nothing to do, or we have
		 * exactly the right amount of runtime to take out.
		 */
P
Peter Zijlstra 已提交
651 652 653
		if (rt_rq->rt_runtime == RUNTIME_INF ||
				rt_rq->rt_runtime == rt_b->rt_runtime)
			goto balanced;
654
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
P
Peter Zijlstra 已提交
655

656 657 658 659 660
		/*
		 * Calculate the difference between what we started out with
		 * and what we current have, that's the amount of runtime
		 * we lend and now have to reclaim.
		 */
P
Peter Zijlstra 已提交
661 662
		want = rt_b->rt_runtime - rt_rq->rt_runtime;

663 664 665
		/*
		 * Greedy reclaim, take back as much as we can.
		 */
666
		for_each_cpu(i, rd->span) {
P
Peter Zijlstra 已提交
667 668 669
			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
			s64 diff;

670 671 672
			/*
			 * Can't reclaim from ourselves or disabled runqueues.
			 */
673
			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
P
Peter Zijlstra 已提交
674 675
				continue;

676
			raw_spin_lock(&iter->rt_runtime_lock);
P
Peter Zijlstra 已提交
677 678 679 680 681 682 683 684
			if (want > 0) {
				diff = min_t(s64, iter->rt_runtime, want);
				iter->rt_runtime -= diff;
				want -= diff;
			} else {
				iter->rt_runtime -= want;
				want -= want;
			}
685
			raw_spin_unlock(&iter->rt_runtime_lock);
P
Peter Zijlstra 已提交
686 687 688 689 690

			if (!want)
				break;
		}

691
		raw_spin_lock(&rt_rq->rt_runtime_lock);
692 693 694 695
		/*
		 * We cannot be left wanting - that would mean some runtime
		 * leaked out of the system.
		 */
P
Peter Zijlstra 已提交
696 697
		BUG_ON(want);
balanced:
698 699 700 701
		/*
		 * Disable all the borrow logic by pretending we have inf
		 * runtime - in which case borrowing doesn't make sense.
		 */
P
Peter Zijlstra 已提交
702
		rt_rq->rt_runtime = RUNTIME_INF;
703
		rt_rq->rt_throttled = 0;
704 705
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
		raw_spin_unlock(&rt_b->rt_runtime_lock);
P
Peter Zijlstra 已提交
706 707 708 709 710
	}
}

static void __enable_runtime(struct rq *rq)
{
C
Cheng Xu 已提交
711
	rt_rq_iter_t iter;
P
Peter Zijlstra 已提交
712 713 714 715 716
	struct rt_rq *rt_rq;

	if (unlikely(!scheduler_running))
		return;

717 718 719
	/*
	 * Reset each runqueue's bandwidth settings
	 */
C
Cheng Xu 已提交
720
	for_each_rt_rq(rt_rq, iter, rq) {
P
Peter Zijlstra 已提交
721 722
		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);

723 724
		raw_spin_lock(&rt_b->rt_runtime_lock);
		raw_spin_lock(&rt_rq->rt_runtime_lock);
P
Peter Zijlstra 已提交
725 726
		rt_rq->rt_runtime = rt_b->rt_runtime;
		rt_rq->rt_time = 0;
727
		rt_rq->rt_throttled = 0;
728 729
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
		raw_spin_unlock(&rt_b->rt_runtime_lock);
P
Peter Zijlstra 已提交
730 731 732
	}
}

733 734 735 736
static int balance_runtime(struct rt_rq *rt_rq)
{
	int more = 0;

737 738 739
	if (!sched_feat(RT_RUNTIME_SHARE))
		return more;

740
	if (rt_rq->rt_time > rt_rq->rt_runtime) {
741
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
742
		more = do_balance_runtime(rt_rq);
743
		raw_spin_lock(&rt_rq->rt_runtime_lock);
744 745 746 747
	}

	return more;
}
748
#else /* !CONFIG_SMP */
749 750 751 752
static inline int balance_runtime(struct rt_rq *rt_rq)
{
	return 0;
}
753
#endif /* CONFIG_SMP */
P
Peter Zijlstra 已提交
754

755 756
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
757
	int i, idle = 1, throttled = 0;
758
	const struct cpumask *span;
759 760

	span = sched_rt_period_mask();
761 762 763 764 765 766 767 768 769 770 771 772 773
#ifdef CONFIG_RT_GROUP_SCHED
	/*
	 * FIXME: isolated CPUs should really leave the root task group,
	 * whether they are isolcpus or were isolated via cpusets, lest
	 * the timer run on a CPU which does not service all runqueues,
	 * potentially leaving other CPUs indefinitely throttled.  If
	 * isolation is really required, the user will turn the throttle
	 * off to kill the perturbations it causes anyway.  Meanwhile,
	 * this maintains functionality for boot and/or troubleshooting.
	 */
	if (rt_b == &root_task_group.rt_bandwidth)
		span = cpu_online_mask;
#endif
774
	for_each_cpu(i, span) {
775 776 777 778
		int enqueue = 0;
		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
		struct rq *rq = rq_of_rt_rq(rt_rq);

779
		raw_spin_lock(&rq->lock);
780 781 782
		if (rt_rq->rt_time) {
			u64 runtime;

783
			raw_spin_lock(&rt_rq->rt_runtime_lock);
784 785 786 787 788 789 790
			if (rt_rq->rt_throttled)
				balance_runtime(rt_rq);
			runtime = rt_rq->rt_runtime;
			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
				rt_rq->rt_throttled = 0;
				enqueue = 1;
791 792 793 794 795 796 797

				/*
				 * Force a clock update if the CPU was idle,
				 * lest wakeup -> unthrottle time accumulate.
				 */
				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
					rq->skip_clock_update = -1;
798 799 800
			}
			if (rt_rq->rt_time || rt_rq->rt_nr_running)
				idle = 0;
801
			raw_spin_unlock(&rt_rq->rt_runtime_lock);
802
		} else if (rt_rq->rt_nr_running) {
803
			idle = 0;
804 805 806
			if (!rt_rq_throttled(rt_rq))
				enqueue = 1;
		}
807 808
		if (rt_rq->rt_throttled)
			throttled = 1;
809 810 811

		if (enqueue)
			sched_rt_rq_enqueue(rt_rq);
812
		raw_spin_unlock(&rq->lock);
813 814
	}

815 816 817
	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
		return 1;

818 819
	return idle;
}
P
Peter Zijlstra 已提交
820

P
Peter Zijlstra 已提交
821 822
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
823
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
824 825 826
	struct rt_rq *rt_rq = group_rt_rq(rt_se);

	if (rt_rq)
827
		return rt_rq->highest_prio.curr;
P
Peter Zijlstra 已提交
828 829 830 831 832
#endif

	return rt_task_of(rt_se)->prio;
}

P
Peter Zijlstra 已提交
833
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
834
{
P
Peter Zijlstra 已提交
835
	u64 runtime = sched_rt_runtime(rt_rq);
P
Peter Zijlstra 已提交
836 837

	if (rt_rq->rt_throttled)
P
Peter Zijlstra 已提交
838
		return rt_rq_throttled(rt_rq);
P
Peter Zijlstra 已提交
839

840
	if (runtime >= sched_rt_period(rt_rq))
P
Peter Zijlstra 已提交
841 842
		return 0;

843 844 845 846
	balance_runtime(rt_rq);
	runtime = sched_rt_runtime(rt_rq);
	if (runtime == RUNTIME_INF)
		return 0;
P
Peter Zijlstra 已提交
847

P
Peter Zijlstra 已提交
848
	if (rt_rq->rt_time > runtime) {
849 850 851 852 853 854 855
		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);

		/*
		 * Don't actually throttle groups that have no runtime assigned
		 * but accrue some time due to boosting.
		 */
		if (likely(rt_b->rt_runtime)) {
856 857
			static bool once = false;

858
			rt_rq->rt_throttled = 1;
859 860 861 862 863

			if (!once) {
				once = true;
				printk_sched("sched: RT throttling activated\n");
			}
864 865 866 867 868 869 870 871 872
		} else {
			/*
			 * In case we did anyway, make it go away,
			 * replenishment is a joke, since it will replenish us
			 * with exactly 0 ns.
			 */
			rt_rq->rt_time = 0;
		}

P
Peter Zijlstra 已提交
873
		if (rt_rq_throttled(rt_rq)) {
P
Peter Zijlstra 已提交
874
			sched_rt_rq_dequeue(rt_rq);
P
Peter Zijlstra 已提交
875 876
			return 1;
		}
P
Peter Zijlstra 已提交
877 878 879 880 881
	}

	return 0;
}

I
Ingo Molnar 已提交
882 883 884 885
/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
A
Alexey Dobriyan 已提交
886
static void update_curr_rt(struct rq *rq)
I
Ingo Molnar 已提交
887 888
{
	struct task_struct *curr = rq->curr;
P
Peter Zijlstra 已提交
889 890
	struct sched_rt_entity *rt_se = &curr->rt;
	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
I
Ingo Molnar 已提交
891 892
	u64 delta_exec;

P
Peter Zijlstra 已提交
893
	if (curr->sched_class != &rt_sched_class)
I
Ingo Molnar 已提交
894 895
		return;

896
	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
897 898
	if (unlikely((s64)delta_exec <= 0))
		return;
I
Ingo Molnar 已提交
899

900 901
	schedstat_set(curr->se.statistics.exec_max,
		      max(curr->se.statistics.exec_max, delta_exec));
I
Ingo Molnar 已提交
902 903

	curr->se.sum_exec_runtime += delta_exec;
904 905
	account_group_exec_runtime(curr, delta_exec);

906
	curr->se.exec_start = rq_clock_task(rq);
907
	cpuacct_charge(curr, delta_exec);
P
Peter Zijlstra 已提交
908

909 910
	sched_rt_avg_update(rq, delta_exec);

911 912 913
	if (!rt_bandwidth_enabled())
		return;

D
Dhaval Giani 已提交
914 915 916
	for_each_sched_rt_entity(rt_se) {
		rt_rq = rt_rq_of_se(rt_se);

917
		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
918
			raw_spin_lock(&rt_rq->rt_runtime_lock);
919 920 921
			rt_rq->rt_time += delta_exec;
			if (sched_rt_runtime_exceeded(rt_rq))
				resched_task(curr);
922
			raw_spin_unlock(&rt_rq->rt_runtime_lock);
923
		}
D
Dhaval Giani 已提交
924
	}
I
Ingo Molnar 已提交
925 926
}

927
#if defined CONFIG_SMP
928

929 930
static void
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
931
{
G
Gregory Haskins 已提交
932
	struct rq *rq = rq_of_rt_rq(rt_rq);
933

934 935 936 937 938 939 940
#ifdef CONFIG_RT_GROUP_SCHED
	/*
	 * Change rq's cpupri only if rt_rq is the top queue.
	 */
	if (&rq->rt != rt_rq)
		return;
#endif
941 942
	if (rq->online && prio < prev_prio)
		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
943
}
944

945 946 947 948
static void
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
	struct rq *rq = rq_of_rt_rq(rt_rq);
949

950 951 952 953 954 955 956
#ifdef CONFIG_RT_GROUP_SCHED
	/*
	 * Change rq's cpupri only if rt_rq is the top queue.
	 */
	if (&rq->rt != rt_rq)
		return;
#endif
957 958
	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
959 960
}

961 962
#else /* CONFIG_SMP */

P
Peter Zijlstra 已提交
963
static inline
964 965 966 967 968
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
static inline
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}

#endif /* CONFIG_SMP */
969

970
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
static void
inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
	int prev_prio = rt_rq->highest_prio.curr;

	if (prio < prev_prio)
		rt_rq->highest_prio.curr = prio;

	inc_rt_prio_smp(rt_rq, prio, prev_prio);
}

static void
dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
	int prev_prio = rt_rq->highest_prio.curr;

P
Peter Zijlstra 已提交
987
	if (rt_rq->rt_nr_running) {
988

989
		WARN_ON(prio < prev_prio);
990

991
		/*
992 993
		 * This may have been our highest task, and therefore
		 * we may have some recomputation to do
994
		 */
995
		if (prio == prev_prio) {
996 997 998
			struct rt_prio_array *array = &rt_rq->active;

			rt_rq->highest_prio.curr =
999
				sched_find_first_bit(array->bitmap);
1000 1001
		}

1002
	} else
1003
		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1004

1005 1006
	dec_rt_prio_smp(rt_rq, prio, prev_prio);
}
1007

1008 1009 1010 1011 1012 1013
#else

static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}

#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1014

1015
#ifdef CONFIG_RT_GROUP_SCHED
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029

static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
	if (rt_se_boosted(rt_se))
		rt_rq->rt_nr_boosted++;

	if (rt_rq->tg)
		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}

static void
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
P
Peter Zijlstra 已提交
1030 1031 1032 1033
	if (rt_se_boosted(rt_se))
		rt_rq->rt_nr_boosted--;

	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
}

#else /* CONFIG_RT_GROUP_SCHED */

static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
	start_rt_bandwidth(&def_rt_bandwidth);
}

static inline
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}

#endif /* CONFIG_RT_GROUP_SCHED */

static inline
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
	int prio = rt_se_prio(rt_se);

	WARN_ON(!rt_prio(prio));
	rt_rq->rt_nr_running++;

	inc_rt_prio(rt_rq, prio);
	inc_rt_migration(rt_se, rt_rq);
	inc_rt_group(rt_se, rt_rq);
}

static inline
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
	WARN_ON(!rt_rq->rt_nr_running);
	rt_rq->rt_nr_running--;

	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
	dec_rt_migration(rt_se, rt_rq);
	dec_rt_group(rt_se, rt_rq);
1072 1073
}

1074
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
I
Ingo Molnar 已提交
1075
{
P
Peter Zijlstra 已提交
1076 1077 1078
	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
	struct rt_prio_array *array = &rt_rq->active;
	struct rt_rq *group_rq = group_rt_rq(rt_se);
1079
	struct list_head *queue = array->queue + rt_se_prio(rt_se);
I
Ingo Molnar 已提交
1080

1081 1082 1083 1084 1085 1086 1087
	/*
	 * Don't enqueue the group if its throttled, or when empty.
	 * The latter is a consequence of the former when a child group
	 * get throttled and the current group doesn't have any other
	 * active members.
	 */
	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
P
Peter Zijlstra 已提交
1088
		return;
1089

1090 1091 1092 1093
	if (head)
		list_add(&rt_se->run_list, queue);
	else
		list_add_tail(&rt_se->run_list, queue);
P
Peter Zijlstra 已提交
1094
	__set_bit(rt_se_prio(rt_se), array->bitmap);
1095

P
Peter Zijlstra 已提交
1096 1097 1098
	inc_rt_tasks(rt_se, rt_rq);
}

1099
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
P
Peter Zijlstra 已提交
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
{
	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
	struct rt_prio_array *array = &rt_rq->active;

	list_del_init(&rt_se->run_list);
	if (list_empty(array->queue + rt_se_prio(rt_se)))
		__clear_bit(rt_se_prio(rt_se), array->bitmap);

	dec_rt_tasks(rt_se, rt_rq);
}

/*
 * Because the prio of an upper entry depends on the lower
 * entries, we must remove entries top - down.
 */
1115
static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
P
Peter Zijlstra 已提交
1116
{
1117
	struct sched_rt_entity *back = NULL;
P
Peter Zijlstra 已提交
1118

1119 1120 1121 1122 1123 1124 1125
	for_each_sched_rt_entity(rt_se) {
		rt_se->back = back;
		back = rt_se;
	}

	for (rt_se = back; rt_se; rt_se = rt_se->back) {
		if (on_rt_rq(rt_se))
1126 1127 1128 1129
			__dequeue_rt_entity(rt_se);
	}
}

1130
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1131 1132 1133
{
	dequeue_rt_stack(rt_se);
	for_each_sched_rt_entity(rt_se)
1134
		__enqueue_rt_entity(rt_se, head);
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
}

static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
{
	dequeue_rt_stack(rt_se);

	for_each_sched_rt_entity(rt_se) {
		struct rt_rq *rt_rq = group_rt_rq(rt_se);

		if (rt_rq && rt_rq->rt_nr_running)
1145
			__enqueue_rt_entity(rt_se, false);
1146
	}
I
Ingo Molnar 已提交
1147 1148 1149 1150 1151
}

/*
 * Adding/removing a task to/from a priority array:
 */
1152
static void
1153
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
P
Peter Zijlstra 已提交
1154 1155 1156
{
	struct sched_rt_entity *rt_se = &p->rt;

1157
	if (flags & ENQUEUE_WAKEUP)
P
Peter Zijlstra 已提交
1158 1159
		rt_se->timeout = 0;

1160
	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1161

1162
	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1163
		enqueue_pushable_task(rq, p);
1164 1165

	inc_nr_running(rq);
P
Peter Zijlstra 已提交
1166 1167
}

1168
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
I
Ingo Molnar 已提交
1169
{
P
Peter Zijlstra 已提交
1170
	struct sched_rt_entity *rt_se = &p->rt;
I
Ingo Molnar 已提交
1171

1172
	update_curr_rt(rq);
1173
	dequeue_rt_entity(rt_se);
1174

1175
	dequeue_pushable_task(rq, p);
1176 1177

	dec_nr_running(rq);
I
Ingo Molnar 已提交
1178 1179 1180
}

/*
1181 1182
 * Put task to the head or the end of the run list without the overhead of
 * dequeue followed by enqueue.
I
Ingo Molnar 已提交
1183
 */
1184 1185
static void
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
P
Peter Zijlstra 已提交
1186
{
1187
	if (on_rt_rq(rt_se)) {
1188 1189 1190 1191 1192 1193 1194
		struct rt_prio_array *array = &rt_rq->active;
		struct list_head *queue = array->queue + rt_se_prio(rt_se);

		if (head)
			list_move(&rt_se->run_list, queue);
		else
			list_move_tail(&rt_se->run_list, queue);
1195
	}
P
Peter Zijlstra 已提交
1196 1197
}

1198
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
I
Ingo Molnar 已提交
1199
{
P
Peter Zijlstra 已提交
1200 1201
	struct sched_rt_entity *rt_se = &p->rt;
	struct rt_rq *rt_rq;
I
Ingo Molnar 已提交
1202

P
Peter Zijlstra 已提交
1203 1204
	for_each_sched_rt_entity(rt_se) {
		rt_rq = rt_rq_of_se(rt_se);
1205
		requeue_rt_entity(rt_rq, rt_se, head);
P
Peter Zijlstra 已提交
1206
	}
I
Ingo Molnar 已提交
1207 1208
}

P
Peter Zijlstra 已提交
1209
static void yield_task_rt(struct rq *rq)
I
Ingo Molnar 已提交
1210
{
1211
	requeue_task_rt(rq, rq->curr, 0);
I
Ingo Molnar 已提交
1212 1213
}

1214
#ifdef CONFIG_SMP
1215 1216
static int find_lowest_rq(struct task_struct *task);

1217
static int
1218
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1219
{
1220 1221
	struct task_struct *curr;
	struct rq *rq;
1222

1223
	if (p->nr_cpus_allowed == 1)
1224 1225
		goto out;

1226 1227 1228 1229
	/* For anything but wake ups, just return the task_cpu */
	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
		goto out;

1230 1231 1232 1233 1234
	rq = cpu_rq(cpu);

	rcu_read_lock();
	curr = ACCESS_ONCE(rq->curr); /* unlocked access */

1235
	/*
1236
	 * If the current task on @p's runqueue is an RT task, then
1237 1238 1239 1240
	 * try to see if we can wake this RT task up on another
	 * runqueue. Otherwise simply start this RT task
	 * on its current runqueue.
	 *
1241 1242 1243 1244 1245 1246 1247 1248 1249
	 * We want to avoid overloading runqueues. If the woken
	 * task is a higher priority, then it will stay on this CPU
	 * and the lower prio task should be moved to another CPU.
	 * Even though this will probably make the lower prio task
	 * lose its cache, we do not want to bounce a higher task
	 * around just because it gave up its CPU, perhaps for a
	 * lock?
	 *
	 * For equal prio tasks, we just let the scheduler sort it out.
1250 1251 1252 1253 1254 1255
	 *
	 * Otherwise, just let it ride on the affined RQ and the
	 * post-schedule router will push the preempted task away
	 *
	 * This test is optimistic, if we get it wrong the load-balancer
	 * will have to sort it out.
1256
	 */
1257
	if (curr && unlikely(rt_task(curr)) &&
1258
	    (curr->nr_cpus_allowed < 2 ||
1259
	     curr->prio <= p->prio)) {
1260
		int target = find_lowest_rq(p);
1261

1262 1263
		if (target != -1)
			cpu = target;
1264
	}
1265
	rcu_read_unlock();
1266

1267
out:
1268
	return cpu;
1269
}
1270 1271 1272

static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
1273
	if (rq->curr->nr_cpus_allowed == 1)
1274 1275
		return;

1276
	if (p->nr_cpus_allowed != 1
1277 1278
	    && cpupri_find(&rq->rd->cpupri, p, NULL))
		return;
1279

1280 1281
	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
		return;
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291

	/*
	 * There appears to be other cpus that can accept
	 * current and none to run 'p', so lets reschedule
	 * to try and push current away:
	 */
	requeue_task_rt(rq, p, 1);
	resched_task(rq->curr);
}

1292 1293
#endif /* CONFIG_SMP */

I
Ingo Molnar 已提交
1294 1295 1296
/*
 * Preempt the current task with a newly woken task if needed:
 */
P
Peter Zijlstra 已提交
1297
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
I
Ingo Molnar 已提交
1298
{
1299
	if (p->prio < rq->curr->prio) {
I
Ingo Molnar 已提交
1300
		resched_task(rq->curr);
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
		return;
	}

#ifdef CONFIG_SMP
	/*
	 * If:
	 *
	 * - the newly woken task is of equal priority to the current task
	 * - the newly woken task is non-migratable while current is migratable
	 * - current will be preempted on the next reschedule
	 *
	 * we should check to see if current can readily move to a different
	 * cpu.  If so, we will reschedule to allow the push logic to try
	 * to move current somewhere else, making room for our non-migratable
	 * task.
	 */
1317
	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1318
		check_preempt_equal_prio(rq, p);
1319
#endif
I
Ingo Molnar 已提交
1320 1321
}

P
Peter Zijlstra 已提交
1322 1323
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
						   struct rt_rq *rt_rq)
I
Ingo Molnar 已提交
1324
{
P
Peter Zijlstra 已提交
1325 1326
	struct rt_prio_array *array = &rt_rq->active;
	struct sched_rt_entity *next = NULL;
I
Ingo Molnar 已提交
1327 1328 1329 1330
	struct list_head *queue;
	int idx;

	idx = sched_find_first_bit(array->bitmap);
P
Peter Zijlstra 已提交
1331
	BUG_ON(idx >= MAX_RT_PRIO);
I
Ingo Molnar 已提交
1332 1333

	queue = array->queue + idx;
P
Peter Zijlstra 已提交
1334
	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1335

P
Peter Zijlstra 已提交
1336 1337
	return next;
}
I
Ingo Molnar 已提交
1338

1339
static struct task_struct *_pick_next_task_rt(struct rq *rq)
P
Peter Zijlstra 已提交
1340 1341 1342
{
	struct sched_rt_entity *rt_se;
	struct task_struct *p;
1343
	struct rt_rq *rt_rq  = &rq->rt;
P
Peter Zijlstra 已提交
1344 1345 1346

	do {
		rt_se = pick_next_rt_entity(rq, rt_rq);
1347
		BUG_ON(!rt_se);
P
Peter Zijlstra 已提交
1348 1349 1350 1351
		rt_rq = group_rt_rq(rt_se);
	} while (rt_rq);

	p = rt_task_of(rt_se);
1352
	p->se.exec_start = rq_clock_task(rq);
1353 1354 1355 1356

	return p;
}

1357 1358
static struct task_struct *
pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1359
{
1360 1361 1362
	struct task_struct *p;
	struct rt_rq *rt_rq = &rq->rt;

P
Peter Zijlstra 已提交
1363
	if (need_pull_rt_task(rq, prev))
1364 1365
		pull_rt_task(rq);

1366 1367 1368 1369 1370 1371
	if (!rt_rq->rt_nr_running)
		return NULL;

	if (rt_rq_throttled(rt_rq))
		return NULL;

1372
	put_prev_task(rq, prev);
1373 1374

	p = _pick_next_task_rt(rq);
1375 1376 1377 1378 1379

	/* The running task is never eligible for pushing */
	if (p)
		dequeue_pushable_task(rq, p);

P
Peter Zijlstra 已提交
1380
	set_post_schedule(rq);
1381

P
Peter Zijlstra 已提交
1382
	return p;
I
Ingo Molnar 已提交
1383 1384
}

1385
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
I
Ingo Molnar 已提交
1386
{
1387
	update_curr_rt(rq);
1388 1389 1390 1391 1392

	/*
	 * The previous task needs to be made eligible for pushing
	 * if it is still active
	 */
1393
	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1394
		enqueue_pushable_task(rq, p);
I
Ingo Molnar 已提交
1395 1396
}

1397
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1398

S
Steven Rostedt 已提交
1399 1400 1401
/* Only try algorithms three times */
#define RT_MAX_TRIES 3

1402 1403 1404
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
	if (!task_running(rq, p) &&
1405
	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1406 1407 1408 1409
		return 1;
	return 0;
}

1410 1411 1412 1413 1414
/*
 * Return the highest pushable rq's task, which is suitable to be executed
 * on the cpu, NULL otherwise
 */
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
S
Steven Rostedt 已提交
1415
{
1416 1417
	struct plist_head *head = &rq->rt.pushable_tasks;
	struct task_struct *p;
1418

1419 1420
	if (!has_pushable_tasks(rq))
		return NULL;
1421

1422 1423 1424
	plist_for_each_entry(p, head, pushable_tasks) {
		if (pick_rt_task(rq, p, cpu))
			return p;
1425 1426
	}

1427
	return NULL;
S
Steven Rostedt 已提交
1428 1429
}

1430
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
S
Steven Rostedt 已提交
1431

G
Gregory Haskins 已提交
1432 1433 1434
static int find_lowest_rq(struct task_struct *task)
{
	struct sched_domain *sd;
1435
	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
G
Gregory Haskins 已提交
1436 1437
	int this_cpu = smp_processor_id();
	int cpu      = task_cpu(task);
G
Gregory Haskins 已提交
1438

1439 1440 1441 1442
	/* Make sure the mask is initialized first */
	if (unlikely(!lowest_mask))
		return -1;

1443
	if (task->nr_cpus_allowed == 1)
1444
		return -1; /* No other targets possible */
G
Gregory Haskins 已提交
1445

1446 1447
	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
		return -1; /* No targets found */
G
Gregory Haskins 已提交
1448 1449 1450 1451 1452 1453 1454 1455 1456

	/*
	 * At this point we have built a mask of cpus representing the
	 * lowest priority tasks in the system.  Now we want to elect
	 * the best one based on our affinity and topology.
	 *
	 * We prioritize the last cpu that the task executed on since
	 * it is most likely cache-hot in that location.
	 */
1457
	if (cpumask_test_cpu(cpu, lowest_mask))
G
Gregory Haskins 已提交
1458 1459 1460 1461 1462 1463
		return cpu;

	/*
	 * Otherwise, we consult the sched_domains span maps to figure
	 * out which cpu is logically closest to our hot cache data.
	 */
R
Rusty Russell 已提交
1464 1465
	if (!cpumask_test_cpu(this_cpu, lowest_mask))
		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
G
Gregory Haskins 已提交
1466

1467
	rcu_read_lock();
R
Rusty Russell 已提交
1468 1469 1470
	for_each_domain(cpu, sd) {
		if (sd->flags & SD_WAKE_AFFINE) {
			int best_cpu;
G
Gregory Haskins 已提交
1471

R
Rusty Russell 已提交
1472 1473 1474 1475 1476
			/*
			 * "this_cpu" is cheaper to preempt than a
			 * remote processor.
			 */
			if (this_cpu != -1 &&
1477 1478
			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
				rcu_read_unlock();
R
Rusty Russell 已提交
1479
				return this_cpu;
1480
			}
R
Rusty Russell 已提交
1481 1482 1483

			best_cpu = cpumask_first_and(lowest_mask,
						     sched_domain_span(sd));
1484 1485
			if (best_cpu < nr_cpu_ids) {
				rcu_read_unlock();
R
Rusty Russell 已提交
1486
				return best_cpu;
1487
			}
G
Gregory Haskins 已提交
1488 1489
		}
	}
1490
	rcu_read_unlock();
G
Gregory Haskins 已提交
1491 1492 1493 1494 1495 1496

	/*
	 * And finally, if there were no matches within the domains
	 * just give the caller *something* to work with from the compatible
	 * locations.
	 */
R
Rusty Russell 已提交
1497 1498 1499 1500 1501 1502 1503
	if (this_cpu != -1)
		return this_cpu;

	cpu = cpumask_any(lowest_mask);
	if (cpu < nr_cpu_ids)
		return cpu;
	return -1;
1504 1505 1506
}

/* Will lock the rq it finds */
1507
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1508 1509 1510
{
	struct rq *lowest_rq = NULL;
	int tries;
1511
	int cpu;
S
Steven Rostedt 已提交
1512

1513 1514 1515
	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
		cpu = find_lowest_rq(task);

1516
		if ((cpu == -1) || (cpu == rq->cpu))
S
Steven Rostedt 已提交
1517 1518
			break;

1519 1520
		lowest_rq = cpu_rq(cpu);

S
Steven Rostedt 已提交
1521
		/* if the prio of this runqueue changed, try again */
1522
		if (double_lock_balance(rq, lowest_rq)) {
S
Steven Rostedt 已提交
1523 1524 1525 1526 1527 1528
			/*
			 * We had to unlock the run queue. In
			 * the mean time, task could have
			 * migrated already or had its affinity changed.
			 * Also make sure that it wasn't scheduled on its rq.
			 */
1529
			if (unlikely(task_rq(task) != rq ||
1530
				     !cpumask_test_cpu(lowest_rq->cpu,
1531
						       tsk_cpus_allowed(task)) ||
1532
				     task_running(rq, task) ||
P
Peter Zijlstra 已提交
1533
				     !task->on_rq)) {
1534

1535
				double_unlock_balance(rq, lowest_rq);
S
Steven Rostedt 已提交
1536 1537 1538 1539 1540 1541
				lowest_rq = NULL;
				break;
			}
		}

		/* If this rq is still suitable use it. */
1542
		if (lowest_rq->rt.highest_prio.curr > task->prio)
S
Steven Rostedt 已提交
1543 1544 1545
			break;

		/* try again */
1546
		double_unlock_balance(rq, lowest_rq);
S
Steven Rostedt 已提交
1547 1548 1549 1550 1551 1552
		lowest_rq = NULL;
	}

	return lowest_rq;
}

1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
	struct task_struct *p;

	if (!has_pushable_tasks(rq))
		return NULL;

	p = plist_first_entry(&rq->rt.pushable_tasks,
			      struct task_struct, pushable_tasks);

	BUG_ON(rq->cpu != task_cpu(p));
	BUG_ON(task_current(rq, p));
1565
	BUG_ON(p->nr_cpus_allowed <= 1);
1566

P
Peter Zijlstra 已提交
1567
	BUG_ON(!p->on_rq);
1568 1569 1570 1571 1572
	BUG_ON(!rt_task(p));

	return p;
}

S
Steven Rostedt 已提交
1573 1574 1575 1576 1577
/*
 * If the current CPU has more than one RT task, see if the non
 * running task can migrate over to a CPU that is running a task
 * of lesser priority.
 */
1578
static int push_rt_task(struct rq *rq)
S
Steven Rostedt 已提交
1579 1580 1581
{
	struct task_struct *next_task;
	struct rq *lowest_rq;
1582
	int ret = 0;
S
Steven Rostedt 已提交
1583

G
Gregory Haskins 已提交
1584 1585 1586
	if (!rq->rt.overloaded)
		return 0;

1587
	next_task = pick_next_pushable_task(rq);
S
Steven Rostedt 已提交
1588 1589 1590
	if (!next_task)
		return 0;

P
Peter Zijlstra 已提交
1591
retry:
1592
	if (unlikely(next_task == rq->curr)) {
1593
		WARN_ON(1);
S
Steven Rostedt 已提交
1594
		return 0;
1595
	}
S
Steven Rostedt 已提交
1596 1597 1598 1599 1600 1601

	/*
	 * It's possible that the next_task slipped in of
	 * higher priority than current. If that's the case
	 * just reschedule current.
	 */
1602 1603
	if (unlikely(next_task->prio < rq->curr->prio)) {
		resched_task(rq->curr);
S
Steven Rostedt 已提交
1604 1605 1606
		return 0;
	}

1607
	/* We might release rq lock */
S
Steven Rostedt 已提交
1608 1609 1610
	get_task_struct(next_task);

	/* find_lock_lowest_rq locks the rq if found */
1611
	lowest_rq = find_lock_lowest_rq(next_task, rq);
S
Steven Rostedt 已提交
1612 1613 1614
	if (!lowest_rq) {
		struct task_struct *task;
		/*
1615
		 * find_lock_lowest_rq releases rq->lock
1616 1617 1618 1619 1620
		 * so it is possible that next_task has migrated.
		 *
		 * We need to make sure that the task is still on the same
		 * run-queue and is also still the next task eligible for
		 * pushing.
S
Steven Rostedt 已提交
1621
		 */
1622
		task = pick_next_pushable_task(rq);
1623 1624
		if (task_cpu(next_task) == rq->cpu && task == next_task) {
			/*
1625 1626 1627 1628
			 * The task hasn't migrated, and is still the next
			 * eligible task, but we failed to find a run-queue
			 * to push it to.  Do not retry in this case, since
			 * other cpus will pull from us when ready.
1629 1630
			 */
			goto out;
S
Steven Rostedt 已提交
1631
		}
1632

1633 1634 1635 1636
		if (!task)
			/* No more tasks, just exit */
			goto out;

1637
		/*
1638
		 * Something has shifted, try again.
1639
		 */
1640 1641 1642
		put_task_struct(next_task);
		next_task = task;
		goto retry;
S
Steven Rostedt 已提交
1643 1644
	}

1645
	deactivate_task(rq, next_task, 0);
S
Steven Rostedt 已提交
1646 1647
	set_task_cpu(next_task, lowest_rq->cpu);
	activate_task(lowest_rq, next_task, 0);
1648
	ret = 1;
S
Steven Rostedt 已提交
1649 1650 1651

	resched_task(lowest_rq->curr);

1652
	double_unlock_balance(rq, lowest_rq);
S
Steven Rostedt 已提交
1653 1654 1655 1656

out:
	put_task_struct(next_task);

1657
	return ret;
S
Steven Rostedt 已提交
1658 1659 1660 1661 1662 1663 1664 1665 1666
}

static void push_rt_tasks(struct rq *rq)
{
	/* push_rt_task will return true if it moved an RT */
	while (push_rt_task(rq))
		;
}

1667 1668
static int pull_rt_task(struct rq *this_rq)
{
I
Ingo Molnar 已提交
1669
	int this_cpu = this_rq->cpu, ret = 0, cpu;
1670
	struct task_struct *p;
1671 1672
	struct rq *src_rq;

1673
	if (likely(!rt_overloaded(this_rq)))
1674 1675
		return 0;

P
Peter Zijlstra 已提交
1676 1677 1678 1679 1680 1681
	/*
	 * Match the barrier from rt_set_overloaded; this guarantees that if we
	 * see overloaded we must also see the rto_mask bit.
	 */
	smp_rmb();

1682
	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1683 1684 1685 1686
		if (this_cpu == cpu)
			continue;

		src_rq = cpu_rq(cpu);
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698

		/*
		 * Don't bother taking the src_rq->lock if the next highest
		 * task is known to be lower-priority than our current task.
		 * This may look racy, but if this value is about to go
		 * logically higher, the src_rq will push this task away.
		 * And if its going logically lower, we do not care
		 */
		if (src_rq->rt.highest_prio.next >=
		    this_rq->rt.highest_prio.curr)
			continue;

1699 1700 1701
		/*
		 * We can potentially drop this_rq's lock in
		 * double_lock_balance, and another CPU could
1702
		 * alter this_rq
1703
		 */
1704
		double_lock_balance(this_rq, src_rq);
1705 1706

		/*
1707 1708
		 * We can pull only a task, which is pushable
		 * on its rq, and no others.
1709
		 */
1710
		p = pick_highest_pushable_task(src_rq, this_cpu);
1711 1712 1713 1714 1715

		/*
		 * Do we have an RT task that preempts
		 * the to-be-scheduled task?
		 */
1716
		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1717
			WARN_ON(p == src_rq->curr);
P
Peter Zijlstra 已提交
1718
			WARN_ON(!p->on_rq);
1719 1720 1721 1722 1723 1724 1725

			/*
			 * There's a chance that p is higher in priority
			 * than what's currently running on its cpu.
			 * This is just that p is wakeing up and hasn't
			 * had a chance to schedule. We only pull
			 * p if it is lower in priority than the
1726
			 * current task on the run queue
1727
			 */
1728
			if (p->prio < src_rq->curr->prio)
M
Mike Galbraith 已提交
1729
				goto skip;
1730 1731 1732 1733 1734 1735 1736 1737 1738

			ret = 1;

			deactivate_task(src_rq, p, 0);
			set_task_cpu(p, this_cpu);
			activate_task(this_rq, p, 0);
			/*
			 * We continue with the search, just in
			 * case there's an even higher prio task
L
Lucas De Marchi 已提交
1739
			 * in another runqueue. (low likelihood
1740 1741 1742
			 * but possible)
			 */
		}
P
Peter Zijlstra 已提交
1743
skip:
1744
		double_unlock_balance(this_rq, src_rq);
1745 1746 1747 1748 1749
	}

	return ret;
}

1750
static void post_schedule_rt(struct rq *rq)
S
Steven Rostedt 已提交
1751
{
1752
	push_rt_tasks(rq);
S
Steven Rostedt 已提交
1753 1754
}

1755 1756 1757 1758
/*
 * If we are not running and we are not going to reschedule soon, we should
 * try to push tasks away now
 */
1759
static void task_woken_rt(struct rq *rq, struct task_struct *p)
1760
{
1761
	if (!task_running(rq, p) &&
1762
	    !test_tsk_need_resched(rq->curr) &&
1763
	    has_pushable_tasks(rq) &&
1764
	    p->nr_cpus_allowed > 1 &&
1765
	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
1766
	    (rq->curr->nr_cpus_allowed < 2 ||
1767
	     rq->curr->prio <= p->prio))
1768 1769 1770
		push_rt_tasks(rq);
}

1771
static void set_cpus_allowed_rt(struct task_struct *p,
1772
				const struct cpumask *new_mask)
1773
{
1774 1775
	struct rq *rq;
	int weight;
1776 1777 1778

	BUG_ON(!rt_task(p));

1779 1780
	if (!p->on_rq)
		return;
1781

1782
	weight = cpumask_weight(new_mask);
1783

1784 1785 1786 1787
	/*
	 * Only update if the process changes its state from whether it
	 * can migrate or not.
	 */
1788
	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1789
		return;
1790

1791
	rq = task_rq(p);
1792

1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
	/*
	 * The process used to be able to migrate OR it can now migrate
	 */
	if (weight <= 1) {
		if (!task_current(rq, p))
			dequeue_pushable_task(rq, p);
		BUG_ON(!rq->rt.rt_nr_migratory);
		rq->rt.rt_nr_migratory--;
	} else {
		if (!task_current(rq, p))
			enqueue_pushable_task(rq, p);
		rq->rt.rt_nr_migratory++;
1805
	}
1806 1807

	update_rt_migration(&rq->rt);
1808
}
1809

1810
/* Assumes rq->lock is held */
1811
static void rq_online_rt(struct rq *rq)
1812 1813 1814
{
	if (rq->rt.overloaded)
		rt_set_overload(rq);
1815

P
Peter Zijlstra 已提交
1816 1817
	__enable_runtime(rq);

1818
	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1819 1820 1821
}

/* Assumes rq->lock is held */
1822
static void rq_offline_rt(struct rq *rq)
1823 1824 1825
{
	if (rq->rt.overloaded)
		rt_clear_overload(rq);
1826

P
Peter Zijlstra 已提交
1827 1828
	__disable_runtime(rq);

1829
	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1830
}
1831 1832 1833 1834 1835

/*
 * When switch from the rt queue, we bring ourselves to a position
 * that we might want to pull RT tasks from other runqueues.
 */
P
Peter Zijlstra 已提交
1836
static void switched_from_rt(struct rq *rq, struct task_struct *p)
1837 1838 1839 1840 1841 1842 1843 1844
{
	/*
	 * If there are other RT tasks then we will reschedule
	 * and the scheduling of the other RT tasks will handle
	 * the balancing. But if we are the last RT task
	 * we may need to handle the pulling of RT tasks
	 * now.
	 */
1845 1846 1847 1848 1849
	if (!p->on_rq || rq->rt.rt_nr_running)
		return;

	if (pull_rt_task(rq))
		resched_task(rq->curr);
1850
}
1851

1852
void init_sched_rt_class(void)
1853 1854 1855
{
	unsigned int i;

1856
	for_each_possible_cpu(i) {
1857
		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1858
					GFP_KERNEL, cpu_to_node(i));
1859
	}
1860
}
1861 1862 1863 1864 1865 1866 1867
#endif /* CONFIG_SMP */

/*
 * When switching a task to RT, we may overload the runqueue
 * with RT tasks. In this case we try to push them off to
 * other runqueues.
 */
P
Peter Zijlstra 已提交
1868
static void switched_to_rt(struct rq *rq, struct task_struct *p)
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
{
	int check_resched = 1;

	/*
	 * If we are already running, then there's nothing
	 * that needs to be done. But if we are not running
	 * we may need to preempt the current running task.
	 * If that current running task is also an RT task
	 * then see if we can move to another run queue.
	 */
P
Peter Zijlstra 已提交
1879
	if (p->on_rq && rq->curr != p) {
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
#ifdef CONFIG_SMP
		if (rq->rt.overloaded && push_rt_task(rq) &&
		    /* Don't resched if we changed runqueues */
		    rq != task_rq(p))
			check_resched = 0;
#endif /* CONFIG_SMP */
		if (check_resched && p->prio < rq->curr->prio)
			resched_task(rq->curr);
	}
}

/*
 * Priority of the task has changed. This may cause
 * us to initiate a push or pull.
 */
P
Peter Zijlstra 已提交
1895 1896
static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1897
{
P
Peter Zijlstra 已提交
1898
	if (!p->on_rq)
P
Peter Zijlstra 已提交
1899 1900 1901
		return;

	if (rq->curr == p) {
1902 1903 1904 1905 1906 1907 1908 1909 1910
#ifdef CONFIG_SMP
		/*
		 * If our priority decreases while running, we
		 * may need to pull tasks to this runqueue.
		 */
		if (oldprio < p->prio)
			pull_rt_task(rq);
		/*
		 * If there's a higher priority task waiting to run
1911 1912 1913
		 * then reschedule. Note, the above pull_rt_task
		 * can release the rq lock and p could migrate.
		 * Only reschedule if p is still on the same runqueue.
1914
		 */
1915
		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1916 1917 1918 1919 1920
			resched_task(p);
#else
		/* For UP simply resched on drop of prio */
		if (oldprio < p->prio)
			resched_task(p);
S
Steven Rostedt 已提交
1921
#endif /* CONFIG_SMP */
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
	} else {
		/*
		 * This task is not running, but if it is
		 * greater than the current running task
		 * then reschedule.
		 */
		if (p->prio < rq->curr->prio)
			resched_task(rq->curr);
	}
}

1933 1934 1935 1936
static void watchdog(struct rq *rq, struct task_struct *p)
{
	unsigned long soft, hard;

1937 1938 1939
	/* max may change after cur was read, this will be fixed next tick */
	soft = task_rlimit(p, RLIMIT_RTTIME);
	hard = task_rlimit_max(p, RLIMIT_RTTIME);
1940 1941 1942 1943

	if (soft != RLIM_INFINITY) {
		unsigned long next;

1944 1945 1946 1947 1948
		if (p->rt.watchdog_stamp != jiffies) {
			p->rt.timeout++;
			p->rt.watchdog_stamp = jiffies;
		}

1949
		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1950
		if (p->rt.timeout > next)
1951
			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1952 1953
	}
}
I
Ingo Molnar 已提交
1954

P
Peter Zijlstra 已提交
1955
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
I
Ingo Molnar 已提交
1956
{
1957 1958
	struct sched_rt_entity *rt_se = &p->rt;

1959 1960
	update_curr_rt(rq);

1961 1962
	watchdog(rq, p);

I
Ingo Molnar 已提交
1963 1964 1965 1966 1967 1968 1969
	/*
	 * RR tasks need a special form of timeslice management.
	 * FIFO tasks have no timeslices.
	 */
	if (p->policy != SCHED_RR)
		return;

P
Peter Zijlstra 已提交
1970
	if (--p->rt.time_slice)
I
Ingo Molnar 已提交
1971 1972
		return;

1973
	p->rt.time_slice = sched_rr_timeslice;
I
Ingo Molnar 已提交
1974

1975
	/*
L
Li Bin 已提交
1976 1977
	 * Requeue to the end of queue if we (and all of our ancestors) are not
	 * the only element on the queue
1978
	 */
1979 1980 1981 1982 1983 1984
	for_each_sched_rt_entity(rt_se) {
		if (rt_se->run_list.prev != rt_se->run_list.next) {
			requeue_task_rt(rq, p, 0);
			set_tsk_need_resched(p);
			return;
		}
1985
	}
I
Ingo Molnar 已提交
1986 1987
}

1988 1989 1990 1991
static void set_curr_task_rt(struct rq *rq)
{
	struct task_struct *p = rq->curr;

1992
	p->se.exec_start = rq_clock_task(rq);
1993 1994 1995

	/* The running task is never eligible for pushing */
	dequeue_pushable_task(rq, p);
1996 1997
}

1998
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1999 2000 2001 2002 2003
{
	/*
	 * Time slice is 0 for SCHED_FIFO tasks
	 */
	if (task->policy == SCHED_RR)
2004
		return sched_rr_timeslice;
2005 2006 2007 2008
	else
		return 0;
}

2009
const struct sched_class rt_sched_class = {
2010
	.next			= &fair_sched_class,
I
Ingo Molnar 已提交
2011 2012 2013 2014 2015 2016 2017 2018 2019
	.enqueue_task		= enqueue_task_rt,
	.dequeue_task		= dequeue_task_rt,
	.yield_task		= yield_task_rt,

	.check_preempt_curr	= check_preempt_curr_rt,

	.pick_next_task		= pick_next_task_rt,
	.put_prev_task		= put_prev_task_rt,

2020
#ifdef CONFIG_SMP
L
Li Zefan 已提交
2021 2022
	.select_task_rq		= select_task_rq_rt,

2023
	.set_cpus_allowed       = set_cpus_allowed_rt,
2024 2025
	.rq_online              = rq_online_rt,
	.rq_offline             = rq_offline_rt,
2026
	.post_schedule		= post_schedule_rt,
2027
	.task_woken		= task_woken_rt,
2028
	.switched_from		= switched_from_rt,
2029
#endif
I
Ingo Molnar 已提交
2030

2031
	.set_curr_task          = set_curr_task_rt,
I
Ingo Molnar 已提交
2032
	.task_tick		= task_tick_rt,
2033

2034 2035
	.get_rr_interval	= get_rr_interval_rt,

2036 2037
	.prio_changed		= prio_changed_rt,
	.switched_to		= switched_to_rt,
I
Ingo Molnar 已提交
2038
};
2039 2040 2041 2042

#ifdef CONFIG_SCHED_DEBUG
extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);

2043
void print_rt_stats(struct seq_file *m, int cpu)
2044
{
C
Cheng Xu 已提交
2045
	rt_rq_iter_t iter;
2046 2047 2048
	struct rt_rq *rt_rq;

	rcu_read_lock();
C
Cheng Xu 已提交
2049
	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2050 2051 2052
		print_rt_rq(m, cpu, rt_rq);
	rcu_read_unlock();
}
2053
#endif /* CONFIG_SCHED_DEBUG */