rt.c 48.9 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5
/*
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
 * policies)
 */

6 7 8 9
#include "sched.h"

#include <linux/slab.h>

10 11
int sched_rr_timeslice = RR_TIMESLICE;

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);

struct rt_bandwidth def_rt_bandwidth;

static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
	struct rt_bandwidth *rt_b =
		container_of(timer, struct rt_bandwidth, rt_period_timer);
	ktime_t now;
	int overrun;
	int idle = 0;

	for (;;) {
		now = hrtimer_cb_get_time(timer);
		overrun = hrtimer_forward(timer, now, rt_b->rt_period);

		if (!overrun)
			break;

		idle = do_sched_rt_period_timer(rt_b, overrun);
	}

	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}

void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
	rt_b->rt_period = ns_to_ktime(period);
	rt_b->rt_runtime = runtime;

	raw_spin_lock_init(&rt_b->rt_runtime_lock);

	hrtimer_init(&rt_b->rt_period_timer,
			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	rt_b->rt_period_timer.function = sched_rt_period_timer;
}

static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
		return;

	if (hrtimer_active(&rt_b->rt_period_timer))
		return;

	raw_spin_lock(&rt_b->rt_runtime_lock);
	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
	raw_spin_unlock(&rt_b->rt_runtime_lock);
}

void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
{
	struct rt_prio_array *array;
	int i;

	array = &rt_rq->active;
	for (i = 0; i < MAX_RT_PRIO; i++) {
		INIT_LIST_HEAD(array->queue + i);
		__clear_bit(i, array->bitmap);
	}
	/* delimiter for bitsearch: */
	__set_bit(MAX_RT_PRIO, array->bitmap);

#if defined CONFIG_SMP
	rt_rq->highest_prio.curr = MAX_RT_PRIO;
	rt_rq->highest_prio.next = MAX_RT_PRIO;
	rt_rq->rt_nr_migratory = 0;
	rt_rq->overloaded = 0;
	plist_head_init(&rt_rq->pushable_tasks);
#endif
82 83
	/* We start is dequeued state, because no RT tasks are queued */
	rt_rq->rt_queued = 0;
84 85 86 87 88 89 90

	rt_rq->rt_time = 0;
	rt_rq->rt_throttled = 0;
	rt_rq->rt_runtime = 0;
	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}

91
#ifdef CONFIG_RT_GROUP_SCHED
92 93 94 95
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
	hrtimer_cancel(&rt_b->rt_period_timer);
}
96 97 98

#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)

99 100
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
101 102 103
#ifdef CONFIG_SCHED_DEBUG
	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif
104 105 106 107 108 109 110 111 112 113 114 115 116
	return container_of(rt_se, struct task_struct, rt);
}

static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
	return rt_rq->rq;
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
	return rt_se->rt_rq;
}

117 118 119 120 121 122 123
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
	struct rt_rq *rt_rq = rt_se->rt_rq;

	return rt_rq->rq;
}

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
void free_rt_sched_group(struct task_group *tg)
{
	int i;

	if (tg->rt_se)
		destroy_rt_bandwidth(&tg->rt_bandwidth);

	for_each_possible_cpu(i) {
		if (tg->rt_rq)
			kfree(tg->rt_rq[i]);
		if (tg->rt_se)
			kfree(tg->rt_se[i]);
	}

	kfree(tg->rt_rq);
	kfree(tg->rt_se);
}

void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
		struct sched_rt_entity *rt_se, int cpu,
		struct sched_rt_entity *parent)
{
	struct rq *rq = cpu_rq(cpu);

	rt_rq->highest_prio.curr = MAX_RT_PRIO;
	rt_rq->rt_nr_boosted = 0;
	rt_rq->rq = rq;
	rt_rq->tg = tg;

	tg->rt_rq[cpu] = rt_rq;
	tg->rt_se[cpu] = rt_se;

	if (!rt_se)
		return;

	if (!parent)
		rt_se->rt_rq = &rq->rt;
	else
		rt_se->rt_rq = parent->my_q;

	rt_se->my_q = rt_rq;
	rt_se->parent = parent;
	INIT_LIST_HEAD(&rt_se->run_list);
}

int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
	struct rt_rq *rt_rq;
	struct sched_rt_entity *rt_se;
	int i;

	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
	if (!tg->rt_rq)
		goto err;
	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
	if (!tg->rt_se)
		goto err;

	init_rt_bandwidth(&tg->rt_bandwidth,
			ktime_to_ns(def_rt_bandwidth.rt_period), 0);

	for_each_possible_cpu(i) {
		rt_rq = kzalloc_node(sizeof(struct rt_rq),
				     GFP_KERNEL, cpu_to_node(i));
		if (!rt_rq)
			goto err;

		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
				     GFP_KERNEL, cpu_to_node(i));
		if (!rt_se)
			goto err_free_rq;

		init_rt_rq(rt_rq, cpu_rq(i));
		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
	}

	return 1;

err_free_rq:
	kfree(rt_rq);
err:
	return 0;
}

209 210
#else /* CONFIG_RT_GROUP_SCHED */

211 212
#define rt_entity_is_task(rt_se) (1)

213 214 215 216 217
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
	return container_of(rt_se, struct task_struct, rt);
}

218 219 220 221 222
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
	return container_of(rt_rq, struct rq, rt);
}

223
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
224 225
{
	struct task_struct *p = rt_task_of(rt_se);
226 227 228 229 230 231 232

	return task_rq(p);
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
	struct rq *rq = rq_of_rt_se(rt_se);
233 234 235 236

	return &rq->rt;
}

237 238 239 240 241 242
void free_rt_sched_group(struct task_group *tg) { }

int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
	return 1;
}
243 244
#endif /* CONFIG_RT_GROUP_SCHED */

S
Steven Rostedt 已提交
245
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
246

247 248
static int pull_rt_task(struct rq *this_rq);

P
Peter Zijlstra 已提交
249 250 251 252 253 254
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
	/* Try to pull RT tasks here if we lower this rq's prio */
	return rq->rt.highest_prio.curr > prev->prio;
}

255
static inline int rt_overloaded(struct rq *rq)
S
Steven Rostedt 已提交
256
{
257
	return atomic_read(&rq->rd->rto_count);
S
Steven Rostedt 已提交
258
}
I
Ingo Molnar 已提交
259

S
Steven Rostedt 已提交
260 261
static inline void rt_set_overload(struct rq *rq)
{
262 263 264
	if (!rq->online)
		return;

265
	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
S
Steven Rostedt 已提交
266 267 268 269 270 271
	/*
	 * Make sure the mask is visible before we set
	 * the overload count. That is checked to determine
	 * if we should look at the mask. It would be a shame
	 * if we looked at the mask, but the mask was not
	 * updated yet.
P
Peter Zijlstra 已提交
272 273
	 *
	 * Matched by the barrier in pull_rt_task().
S
Steven Rostedt 已提交
274
	 */
P
Peter Zijlstra 已提交
275
	smp_wmb();
276
	atomic_inc(&rq->rd->rto_count);
S
Steven Rostedt 已提交
277
}
I
Ingo Molnar 已提交
278

S
Steven Rostedt 已提交
279 280
static inline void rt_clear_overload(struct rq *rq)
{
281 282 283
	if (!rq->online)
		return;

S
Steven Rostedt 已提交
284
	/* the order here really doesn't matter */
285
	atomic_dec(&rq->rd->rto_count);
286
	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
S
Steven Rostedt 已提交
287
}
288

289
static void update_rt_migration(struct rt_rq *rt_rq)
290
{
291
	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
292 293 294
		if (!rt_rq->overloaded) {
			rt_set_overload(rq_of_rt_rq(rt_rq));
			rt_rq->overloaded = 1;
295
		}
296 297 298
	} else if (rt_rq->overloaded) {
		rt_clear_overload(rq_of_rt_rq(rt_rq));
		rt_rq->overloaded = 0;
299
	}
300
}
S
Steven Rostedt 已提交
301

302 303
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
304 305
	struct task_struct *p;

306 307 308
	if (!rt_entity_is_task(rt_se))
		return;

309
	p = rt_task_of(rt_se);
310 311 312
	rt_rq = &rq_of_rt_rq(rt_rq)->rt;

	rt_rq->rt_nr_total++;
313
	if (p->nr_cpus_allowed > 1)
314 315 316 317 318 319 320
		rt_rq->rt_nr_migratory++;

	update_rt_migration(rt_rq);
}

static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
321 322
	struct task_struct *p;

323 324 325
	if (!rt_entity_is_task(rt_se))
		return;

326
	p = rt_task_of(rt_se);
327 328 329
	rt_rq = &rq_of_rt_rq(rt_rq)->rt;

	rt_rq->rt_nr_total--;
330
	if (p->nr_cpus_allowed > 1)
331 332 333 334 335
		rt_rq->rt_nr_migratory--;

	update_rt_migration(rt_rq);
}

336 337 338 339 340
static inline int has_pushable_tasks(struct rq *rq)
{
	return !plist_head_empty(&rq->rt.pushable_tasks);
}

P
Peter Zijlstra 已提交
341 342 343 344 345 346 347 348 349
static inline void set_post_schedule(struct rq *rq)
{
	/*
	 * We detect this state here so that we can avoid taking the RQ
	 * lock again later if there is no need to push
	 */
	rq->post_schedule = has_pushable_tasks(rq);
}

350 351 352 353 354
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
	plist_node_init(&p->pushable_tasks, p->prio);
	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
355 356 357 358

	/* Update the highest prio pushable task */
	if (p->prio < rq->rt.highest_prio.next)
		rq->rt.highest_prio.next = p->prio;
359 360 361 362 363 364
}

static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);

365 366 367 368 369 370 371
	/* Update the new highest prio pushable task */
	if (has_pushable_tasks(rq)) {
		p = plist_first_entry(&rq->rt.pushable_tasks,
				      struct task_struct, pushable_tasks);
		rq->rt.highest_prio.next = p->prio;
	} else
		rq->rt.highest_prio.next = MAX_RT_PRIO;
372 373
}

374 375
#else

376
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
P
Peter Zijlstra 已提交
377
{
P
Peter Zijlstra 已提交
378 379
}

380 381 382 383
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}

384
static inline
385 386 387 388
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}

389
static inline
390 391 392
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
393

P
Peter Zijlstra 已提交
394 395 396 397 398 399 400 401 402 403 404 405 406
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
	return false;
}

static inline int pull_rt_task(struct rq *this_rq)
{
	return 0;
}

static inline void set_post_schedule(struct rq *rq)
{
}
S
Steven Rostedt 已提交
407 408
#endif /* CONFIG_SMP */

409 410 411
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
static void dequeue_top_rt_rq(struct rt_rq *rt_rq);

P
Peter Zijlstra 已提交
412 413 414 415 416
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
	return !list_empty(&rt_se->run_list);
}

417
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
418

P
Peter Zijlstra 已提交
419
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
420 421
{
	if (!rt_rq->tg)
P
Peter Zijlstra 已提交
422
		return RUNTIME_INF;
P
Peter Zijlstra 已提交
423

P
Peter Zijlstra 已提交
424 425 426 427 428 429
	return rt_rq->rt_runtime;
}

static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
P
Peter Zijlstra 已提交
430 431
}

C
Cheng Xu 已提交
432 433
typedef struct task_group *rt_rq_iter_t;

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
static inline struct task_group *next_task_group(struct task_group *tg)
{
	do {
		tg = list_entry_rcu(tg->list.next,
			typeof(struct task_group), list);
	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));

	if (&tg->list == &task_groups)
		tg = NULL;

	return tg;
}

#define for_each_rt_rq(rt_rq, iter, rq)					\
	for (iter = container_of(&task_groups, typeof(*iter), list);	\
		(iter = next_task_group(iter)) &&			\
		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
C
Cheng Xu 已提交
451

P
Peter Zijlstra 已提交
452 453 454 455 456 457 458 459
#define for_each_sched_rt_entity(rt_se) \
	for (; rt_se; rt_se = rt_se->parent)

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
	return rt_se->my_q;
}

460
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
P
Peter Zijlstra 已提交
461 462
static void dequeue_rt_entity(struct sched_rt_entity *rt_se);

P
Peter Zijlstra 已提交
463
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
464
{
465
	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
466
	struct rq *rq = rq_of_rt_rq(rt_rq);
467 468
	struct sched_rt_entity *rt_se;

469
	int cpu = cpu_of(rq);
470 471

	rt_se = rt_rq->tg->rt_se[cpu];
P
Peter Zijlstra 已提交
472

473
	if (rt_rq->rt_nr_running) {
474 475 476
		if (!rt_se)
			enqueue_top_rt_rq(rt_rq);
		else if (!on_rt_rq(rt_se))
477
			enqueue_rt_entity(rt_se, false);
478

479
		if (rt_rq->highest_prio.curr < curr->prio)
480
			resched_curr(rq);
P
Peter Zijlstra 已提交
481 482 483
	}
}

P
Peter Zijlstra 已提交
484
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
485
{
486
	struct sched_rt_entity *rt_se;
487
	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
488

489
	rt_se = rt_rq->tg->rt_se[cpu];
P
Peter Zijlstra 已提交
490

491 492 493
	if (!rt_se)
		dequeue_top_rt_rq(rt_rq);
	else if (on_rt_rq(rt_se))
P
Peter Zijlstra 已提交
494 495 496
		dequeue_rt_entity(rt_se);
}

497 498 499 500 501
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}

P
Peter Zijlstra 已提交
502 503 504 505 506 507 508 509 510 511 512 513
static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
	struct rt_rq *rt_rq = group_rt_rq(rt_se);
	struct task_struct *p;

	if (rt_rq)
		return !!rt_rq->rt_nr_boosted;

	p = rt_task_of(rt_se);
	return p->prio != p->normal_prio;
}

514
#ifdef CONFIG_SMP
515
static inline const struct cpumask *sched_rt_period_mask(void)
516
{
N
Nathan Zimmer 已提交
517
	return this_rq()->rd->span;
518
}
P
Peter Zijlstra 已提交
519
#else
520
static inline const struct cpumask *sched_rt_period_mask(void)
521
{
522
	return cpu_online_mask;
523 524
}
#endif
P
Peter Zijlstra 已提交
525

526 527
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
P
Peter Zijlstra 已提交
528
{
529 530
	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
}
P
Peter Zijlstra 已提交
531

P
Peter Zijlstra 已提交
532 533 534 535 536
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
	return &rt_rq->tg->rt_bandwidth;
}

537
#else /* !CONFIG_RT_GROUP_SCHED */
538 539 540

static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
P
Peter Zijlstra 已提交
541 542 543 544 545 546
	return rt_rq->rt_runtime;
}

static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
	return ktime_to_ns(def_rt_bandwidth.rt_period);
P
Peter Zijlstra 已提交
547 548
}

C
Cheng Xu 已提交
549 550 551 552 553
typedef struct rt_rq *rt_rq_iter_t;

#define for_each_rt_rq(rt_rq, iter, rq) \
	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)

P
Peter Zijlstra 已提交
554 555 556 557 558 559 560 561
#define for_each_sched_rt_entity(rt_se) \
	for (; rt_se; rt_se = NULL)

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
	return NULL;
}

P
Peter Zijlstra 已提交
562
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
563
{
564 565 566 567 568 569
	struct rq *rq = rq_of_rt_rq(rt_rq);

	if (!rt_rq->rt_nr_running)
		return;

	enqueue_top_rt_rq(rt_rq);
570
	resched_curr(rq);
P
Peter Zijlstra 已提交
571 572
}

P
Peter Zijlstra 已提交
573
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
574
{
575
	dequeue_top_rt_rq(rt_rq);
P
Peter Zijlstra 已提交
576 577
}

578 579 580 581 582
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
	return rt_rq->rt_throttled;
}

583
static inline const struct cpumask *sched_rt_period_mask(void)
584
{
585
	return cpu_online_mask;
586 587 588 589 590 591 592 593
}

static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
	return &cpu_rq(cpu)->rt;
}

P
Peter Zijlstra 已提交
594 595 596 597 598
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
	return &def_rt_bandwidth;
}

599
#endif /* CONFIG_RT_GROUP_SCHED */
600

601 602 603 604 605 606 607 608
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);

	return (hrtimer_active(&rt_b->rt_period_timer) ||
		rt_rq->rt_time < rt_b->rt_runtime);
}

P
Peter Zijlstra 已提交
609
#ifdef CONFIG_SMP
610 611 612
/*
 * We ran out of runtime, see if we can borrow some from our neighbours.
 */
613
static int do_balance_runtime(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
614 615
{
	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
616
	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
P
Peter Zijlstra 已提交
617 618 619
	int i, weight, more = 0;
	u64 rt_period;

620
	weight = cpumask_weight(rd->span);
P
Peter Zijlstra 已提交
621

622
	raw_spin_lock(&rt_b->rt_runtime_lock);
P
Peter Zijlstra 已提交
623
	rt_period = ktime_to_ns(rt_b->rt_period);
624
	for_each_cpu(i, rd->span) {
P
Peter Zijlstra 已提交
625 626 627 628 629 630
		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
		s64 diff;

		if (iter == rt_rq)
			continue;

631
		raw_spin_lock(&iter->rt_runtime_lock);
632 633 634 635 636
		/*
		 * Either all rqs have inf runtime and there's nothing to steal
		 * or __disable_runtime() below sets a specific rq to inf to
		 * indicate its been disabled and disalow stealing.
		 */
P
Peter Zijlstra 已提交
637 638 639
		if (iter->rt_runtime == RUNTIME_INF)
			goto next;

640 641 642 643
		/*
		 * From runqueues with spare time, take 1/n part of their
		 * spare time, but no more than our period.
		 */
P
Peter Zijlstra 已提交
644 645
		diff = iter->rt_runtime - iter->rt_time;
		if (diff > 0) {
646
			diff = div_u64((u64)diff, weight);
P
Peter Zijlstra 已提交
647 648 649 650 651 652
			if (rt_rq->rt_runtime + diff > rt_period)
				diff = rt_period - rt_rq->rt_runtime;
			iter->rt_runtime -= diff;
			rt_rq->rt_runtime += diff;
			more = 1;
			if (rt_rq->rt_runtime == rt_period) {
653
				raw_spin_unlock(&iter->rt_runtime_lock);
P
Peter Zijlstra 已提交
654 655 656
				break;
			}
		}
P
Peter Zijlstra 已提交
657
next:
658
		raw_spin_unlock(&iter->rt_runtime_lock);
P
Peter Zijlstra 已提交
659
	}
660
	raw_spin_unlock(&rt_b->rt_runtime_lock);
P
Peter Zijlstra 已提交
661 662 663

	return more;
}
P
Peter Zijlstra 已提交
664

665 666 667
/*
 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 */
P
Peter Zijlstra 已提交
668 669 670
static void __disable_runtime(struct rq *rq)
{
	struct root_domain *rd = rq->rd;
C
Cheng Xu 已提交
671
	rt_rq_iter_t iter;
P
Peter Zijlstra 已提交
672 673 674 675 676
	struct rt_rq *rt_rq;

	if (unlikely(!scheduler_running))
		return;

C
Cheng Xu 已提交
677
	for_each_rt_rq(rt_rq, iter, rq) {
P
Peter Zijlstra 已提交
678 679 680 681
		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
		s64 want;
		int i;

682 683
		raw_spin_lock(&rt_b->rt_runtime_lock);
		raw_spin_lock(&rt_rq->rt_runtime_lock);
684 685 686 687 688
		/*
		 * Either we're all inf and nobody needs to borrow, or we're
		 * already disabled and thus have nothing to do, or we have
		 * exactly the right amount of runtime to take out.
		 */
P
Peter Zijlstra 已提交
689 690 691
		if (rt_rq->rt_runtime == RUNTIME_INF ||
				rt_rq->rt_runtime == rt_b->rt_runtime)
			goto balanced;
692
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
P
Peter Zijlstra 已提交
693

694 695 696 697 698
		/*
		 * Calculate the difference between what we started out with
		 * and what we current have, that's the amount of runtime
		 * we lend and now have to reclaim.
		 */
P
Peter Zijlstra 已提交
699 700
		want = rt_b->rt_runtime - rt_rq->rt_runtime;

701 702 703
		/*
		 * Greedy reclaim, take back as much as we can.
		 */
704
		for_each_cpu(i, rd->span) {
P
Peter Zijlstra 已提交
705 706 707
			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
			s64 diff;

708 709 710
			/*
			 * Can't reclaim from ourselves or disabled runqueues.
			 */
711
			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
P
Peter Zijlstra 已提交
712 713
				continue;

714
			raw_spin_lock(&iter->rt_runtime_lock);
P
Peter Zijlstra 已提交
715 716 717 718 719 720 721 722
			if (want > 0) {
				diff = min_t(s64, iter->rt_runtime, want);
				iter->rt_runtime -= diff;
				want -= diff;
			} else {
				iter->rt_runtime -= want;
				want -= want;
			}
723
			raw_spin_unlock(&iter->rt_runtime_lock);
P
Peter Zijlstra 已提交
724 725 726 727 728

			if (!want)
				break;
		}

729
		raw_spin_lock(&rt_rq->rt_runtime_lock);
730 731 732 733
		/*
		 * We cannot be left wanting - that would mean some runtime
		 * leaked out of the system.
		 */
P
Peter Zijlstra 已提交
734 735
		BUG_ON(want);
balanced:
736 737 738 739
		/*
		 * Disable all the borrow logic by pretending we have inf
		 * runtime - in which case borrowing doesn't make sense.
		 */
P
Peter Zijlstra 已提交
740
		rt_rq->rt_runtime = RUNTIME_INF;
741
		rt_rq->rt_throttled = 0;
742 743
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
		raw_spin_unlock(&rt_b->rt_runtime_lock);
744 745 746

		/* Make rt_rq available for pick_next_task() */
		sched_rt_rq_enqueue(rt_rq);
P
Peter Zijlstra 已提交
747 748 749 750 751
	}
}

static void __enable_runtime(struct rq *rq)
{
C
Cheng Xu 已提交
752
	rt_rq_iter_t iter;
P
Peter Zijlstra 已提交
753 754 755 756 757
	struct rt_rq *rt_rq;

	if (unlikely(!scheduler_running))
		return;

758 759 760
	/*
	 * Reset each runqueue's bandwidth settings
	 */
C
Cheng Xu 已提交
761
	for_each_rt_rq(rt_rq, iter, rq) {
P
Peter Zijlstra 已提交
762 763
		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);

764 765
		raw_spin_lock(&rt_b->rt_runtime_lock);
		raw_spin_lock(&rt_rq->rt_runtime_lock);
P
Peter Zijlstra 已提交
766 767
		rt_rq->rt_runtime = rt_b->rt_runtime;
		rt_rq->rt_time = 0;
768
		rt_rq->rt_throttled = 0;
769 770
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
		raw_spin_unlock(&rt_b->rt_runtime_lock);
P
Peter Zijlstra 已提交
771 772 773
	}
}

774 775 776 777
static int balance_runtime(struct rt_rq *rt_rq)
{
	int more = 0;

778 779 780
	if (!sched_feat(RT_RUNTIME_SHARE))
		return more;

781
	if (rt_rq->rt_time > rt_rq->rt_runtime) {
782
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
783
		more = do_balance_runtime(rt_rq);
784
		raw_spin_lock(&rt_rq->rt_runtime_lock);
785 786 787 788
	}

	return more;
}
789
#else /* !CONFIG_SMP */
790 791 792 793
static inline int balance_runtime(struct rt_rq *rt_rq)
{
	return 0;
}
794
#endif /* CONFIG_SMP */
P
Peter Zijlstra 已提交
795

796 797
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
798
	int i, idle = 1, throttled = 0;
799
	const struct cpumask *span;
800 801

	span = sched_rt_period_mask();
802 803 804 805 806 807 808 809 810 811 812 813 814
#ifdef CONFIG_RT_GROUP_SCHED
	/*
	 * FIXME: isolated CPUs should really leave the root task group,
	 * whether they are isolcpus or were isolated via cpusets, lest
	 * the timer run on a CPU which does not service all runqueues,
	 * potentially leaving other CPUs indefinitely throttled.  If
	 * isolation is really required, the user will turn the throttle
	 * off to kill the perturbations it causes anyway.  Meanwhile,
	 * this maintains functionality for boot and/or troubleshooting.
	 */
	if (rt_b == &root_task_group.rt_bandwidth)
		span = cpu_online_mask;
#endif
815
	for_each_cpu(i, span) {
816 817 818 819
		int enqueue = 0;
		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
		struct rq *rq = rq_of_rt_rq(rt_rq);

820
		raw_spin_lock(&rq->lock);
821 822 823
		if (rt_rq->rt_time) {
			u64 runtime;

824
			raw_spin_lock(&rt_rq->rt_runtime_lock);
825 826 827 828 829 830 831
			if (rt_rq->rt_throttled)
				balance_runtime(rt_rq);
			runtime = rt_rq->rt_runtime;
			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
				rt_rq->rt_throttled = 0;
				enqueue = 1;
832 833

				/*
834 835 836 837 838
				 * When we're idle and a woken (rt) task is
				 * throttled check_preempt_curr() will set
				 * skip_update and the time between the wakeup
				 * and this unthrottle will get accounted as
				 * 'runtime'.
839 840
				 */
				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
841
					rq_clock_skip_update(rq, false);
842 843 844
			}
			if (rt_rq->rt_time || rt_rq->rt_nr_running)
				idle = 0;
845
			raw_spin_unlock(&rt_rq->rt_runtime_lock);
846
		} else if (rt_rq->rt_nr_running) {
847
			idle = 0;
848 849 850
			if (!rt_rq_throttled(rt_rq))
				enqueue = 1;
		}
851 852
		if (rt_rq->rt_throttled)
			throttled = 1;
853 854 855

		if (enqueue)
			sched_rt_rq_enqueue(rt_rq);
856
		raw_spin_unlock(&rq->lock);
857 858
	}

859 860 861
	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
		return 1;

862 863
	return idle;
}
P
Peter Zijlstra 已提交
864

P
Peter Zijlstra 已提交
865 866
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
867
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
868 869 870
	struct rt_rq *rt_rq = group_rt_rq(rt_se);

	if (rt_rq)
871
		return rt_rq->highest_prio.curr;
P
Peter Zijlstra 已提交
872 873 874 875 876
#endif

	return rt_task_of(rt_se)->prio;
}

P
Peter Zijlstra 已提交
877
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
P
Peter Zijlstra 已提交
878
{
P
Peter Zijlstra 已提交
879
	u64 runtime = sched_rt_runtime(rt_rq);
P
Peter Zijlstra 已提交
880 881

	if (rt_rq->rt_throttled)
P
Peter Zijlstra 已提交
882
		return rt_rq_throttled(rt_rq);
P
Peter Zijlstra 已提交
883

884
	if (runtime >= sched_rt_period(rt_rq))
P
Peter Zijlstra 已提交
885 886
		return 0;

887 888 889 890
	balance_runtime(rt_rq);
	runtime = sched_rt_runtime(rt_rq);
	if (runtime == RUNTIME_INF)
		return 0;
P
Peter Zijlstra 已提交
891

P
Peter Zijlstra 已提交
892
	if (rt_rq->rt_time > runtime) {
893 894 895 896 897 898 899 900
		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);

		/*
		 * Don't actually throttle groups that have no runtime assigned
		 * but accrue some time due to boosting.
		 */
		if (likely(rt_b->rt_runtime)) {
			rt_rq->rt_throttled = 1;
J
John Stultz 已提交
901
			printk_deferred_once("sched: RT throttling activated\n");
902 903 904 905 906 907 908 909 910
		} else {
			/*
			 * In case we did anyway, make it go away,
			 * replenishment is a joke, since it will replenish us
			 * with exactly 0 ns.
			 */
			rt_rq->rt_time = 0;
		}

P
Peter Zijlstra 已提交
911
		if (rt_rq_throttled(rt_rq)) {
P
Peter Zijlstra 已提交
912
			sched_rt_rq_dequeue(rt_rq);
P
Peter Zijlstra 已提交
913 914
			return 1;
		}
P
Peter Zijlstra 已提交
915 916 917 918 919
	}

	return 0;
}

I
Ingo Molnar 已提交
920 921 922 923
/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
A
Alexey Dobriyan 已提交
924
static void update_curr_rt(struct rq *rq)
I
Ingo Molnar 已提交
925 926
{
	struct task_struct *curr = rq->curr;
P
Peter Zijlstra 已提交
927
	struct sched_rt_entity *rt_se = &curr->rt;
I
Ingo Molnar 已提交
928 929
	u64 delta_exec;

P
Peter Zijlstra 已提交
930
	if (curr->sched_class != &rt_sched_class)
I
Ingo Molnar 已提交
931 932
		return;

933
	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
934 935
	if (unlikely((s64)delta_exec <= 0))
		return;
I
Ingo Molnar 已提交
936

937 938
	schedstat_set(curr->se.statistics.exec_max,
		      max(curr->se.statistics.exec_max, delta_exec));
I
Ingo Molnar 已提交
939 940

	curr->se.sum_exec_runtime += delta_exec;
941 942
	account_group_exec_runtime(curr, delta_exec);

943
	curr->se.exec_start = rq_clock_task(rq);
944
	cpuacct_charge(curr, delta_exec);
P
Peter Zijlstra 已提交
945

946 947
	sched_rt_avg_update(rq, delta_exec);

948 949 950
	if (!rt_bandwidth_enabled())
		return;

D
Dhaval Giani 已提交
951
	for_each_sched_rt_entity(rt_se) {
952
		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
D
Dhaval Giani 已提交
953

954
		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
955
			raw_spin_lock(&rt_rq->rt_runtime_lock);
956 957
			rt_rq->rt_time += delta_exec;
			if (sched_rt_runtime_exceeded(rt_rq))
958
				resched_curr(rq);
959
			raw_spin_unlock(&rt_rq->rt_runtime_lock);
960
		}
D
Dhaval Giani 已提交
961
	}
I
Ingo Molnar 已提交
962 963
}

964 965 966 967 968 969 970 971 972 973 974 975
static void
dequeue_top_rt_rq(struct rt_rq *rt_rq)
{
	struct rq *rq = rq_of_rt_rq(rt_rq);

	BUG_ON(&rq->rt != rt_rq);

	if (!rt_rq->rt_queued)
		return;

	BUG_ON(!rq->nr_running);

976
	sub_nr_running(rq, rt_rq->rt_nr_running);
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
	rt_rq->rt_queued = 0;
}

static void
enqueue_top_rt_rq(struct rt_rq *rt_rq)
{
	struct rq *rq = rq_of_rt_rq(rt_rq);

	BUG_ON(&rq->rt != rt_rq);

	if (rt_rq->rt_queued)
		return;
	if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
		return;

992
	add_nr_running(rq, rt_rq->rt_nr_running);
993 994 995
	rt_rq->rt_queued = 1;
}

996
#if defined CONFIG_SMP
997

998 999
static void
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1000
{
G
Gregory Haskins 已提交
1001
	struct rq *rq = rq_of_rt_rq(rt_rq);
1002

1003 1004 1005 1006 1007 1008 1009
#ifdef CONFIG_RT_GROUP_SCHED
	/*
	 * Change rq's cpupri only if rt_rq is the top queue.
	 */
	if (&rq->rt != rt_rq)
		return;
#endif
1010 1011
	if (rq->online && prio < prev_prio)
		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1012
}
1013

1014 1015 1016 1017
static void
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
	struct rq *rq = rq_of_rt_rq(rt_rq);
1018

1019 1020 1021 1022 1023 1024 1025
#ifdef CONFIG_RT_GROUP_SCHED
	/*
	 * Change rq's cpupri only if rt_rq is the top queue.
	 */
	if (&rq->rt != rt_rq)
		return;
#endif
1026 1027
	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1028 1029
}

1030 1031
#else /* CONFIG_SMP */

P
Peter Zijlstra 已提交
1032
static inline
1033 1034 1035 1036 1037
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
static inline
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}

#endif /* CONFIG_SMP */
1038

1039
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
static void
inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
	int prev_prio = rt_rq->highest_prio.curr;

	if (prio < prev_prio)
		rt_rq->highest_prio.curr = prio;

	inc_rt_prio_smp(rt_rq, prio, prev_prio);
}

static void
dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
	int prev_prio = rt_rq->highest_prio.curr;

P
Peter Zijlstra 已提交
1056
	if (rt_rq->rt_nr_running) {
1057

1058
		WARN_ON(prio < prev_prio);
1059

1060
		/*
1061 1062
		 * This may have been our highest task, and therefore
		 * we may have some recomputation to do
1063
		 */
1064
		if (prio == prev_prio) {
1065 1066 1067
			struct rt_prio_array *array = &rt_rq->active;

			rt_rq->highest_prio.curr =
1068
				sched_find_first_bit(array->bitmap);
1069 1070
		}

1071
	} else
1072
		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1073

1074 1075
	dec_rt_prio_smp(rt_rq, prio, prev_prio);
}
1076

1077 1078 1079 1080 1081 1082
#else

static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}

#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1083

1084
#ifdef CONFIG_RT_GROUP_SCHED
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098

static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
	if (rt_se_boosted(rt_se))
		rt_rq->rt_nr_boosted++;

	if (rt_rq->tg)
		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}

static void
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
P
Peter Zijlstra 已提交
1099 1100 1101 1102
	if (rt_se_boosted(rt_se))
		rt_rq->rt_nr_boosted--;

	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
}

#else /* CONFIG_RT_GROUP_SCHED */

static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
	start_rt_bandwidth(&def_rt_bandwidth);
}

static inline
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}

#endif /* CONFIG_RT_GROUP_SCHED */

1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
static inline
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
	struct rt_rq *group_rq = group_rt_rq(rt_se);

	if (group_rq)
		return group_rq->rt_nr_running;
	else
		return 1;
}

1129 1130 1131 1132 1133 1134
static inline
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
	int prio = rt_se_prio(rt_se);

	WARN_ON(!rt_prio(prio));
1135
	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146

	inc_rt_prio(rt_rq, prio);
	inc_rt_migration(rt_se, rt_rq);
	inc_rt_group(rt_se, rt_rq);
}

static inline
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
	WARN_ON(!rt_rq->rt_nr_running);
1147
	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1148 1149 1150 1151

	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
	dec_rt_migration(rt_se, rt_rq);
	dec_rt_group(rt_se, rt_rq);
1152 1153
}

1154
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
I
Ingo Molnar 已提交
1155
{
P
Peter Zijlstra 已提交
1156 1157 1158
	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
	struct rt_prio_array *array = &rt_rq->active;
	struct rt_rq *group_rq = group_rt_rq(rt_se);
1159
	struct list_head *queue = array->queue + rt_se_prio(rt_se);
I
Ingo Molnar 已提交
1160

1161 1162 1163 1164 1165 1166 1167
	/*
	 * Don't enqueue the group if its throttled, or when empty.
	 * The latter is a consequence of the former when a child group
	 * get throttled and the current group doesn't have any other
	 * active members.
	 */
	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
P
Peter Zijlstra 已提交
1168
		return;
1169

1170 1171 1172 1173
	if (head)
		list_add(&rt_se->run_list, queue);
	else
		list_add_tail(&rt_se->run_list, queue);
P
Peter Zijlstra 已提交
1174
	__set_bit(rt_se_prio(rt_se), array->bitmap);
1175

P
Peter Zijlstra 已提交
1176 1177 1178
	inc_rt_tasks(rt_se, rt_rq);
}

1179
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
P
Peter Zijlstra 已提交
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
{
	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
	struct rt_prio_array *array = &rt_rq->active;

	list_del_init(&rt_se->run_list);
	if (list_empty(array->queue + rt_se_prio(rt_se)))
		__clear_bit(rt_se_prio(rt_se), array->bitmap);

	dec_rt_tasks(rt_se, rt_rq);
}

/*
 * Because the prio of an upper entry depends on the lower
 * entries, we must remove entries top - down.
 */
1195
static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
P
Peter Zijlstra 已提交
1196
{
1197
	struct sched_rt_entity *back = NULL;
P
Peter Zijlstra 已提交
1198

1199 1200 1201 1202 1203
	for_each_sched_rt_entity(rt_se) {
		rt_se->back = back;
		back = rt_se;
	}

1204 1205
	dequeue_top_rt_rq(rt_rq_of_se(back));

1206 1207
	for (rt_se = back; rt_se; rt_se = rt_se->back) {
		if (on_rt_rq(rt_se))
1208 1209 1210 1211
			__dequeue_rt_entity(rt_se);
	}
}

1212
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1213
{
1214 1215
	struct rq *rq = rq_of_rt_se(rt_se);

1216 1217
	dequeue_rt_stack(rt_se);
	for_each_sched_rt_entity(rt_se)
1218
		__enqueue_rt_entity(rt_se, head);
1219
	enqueue_top_rt_rq(&rq->rt);
1220 1221 1222 1223
}

static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
{
1224 1225
	struct rq *rq = rq_of_rt_se(rt_se);

1226 1227 1228 1229 1230 1231
	dequeue_rt_stack(rt_se);

	for_each_sched_rt_entity(rt_se) {
		struct rt_rq *rt_rq = group_rt_rq(rt_se);

		if (rt_rq && rt_rq->rt_nr_running)
1232
			__enqueue_rt_entity(rt_se, false);
1233
	}
1234
	enqueue_top_rt_rq(&rq->rt);
I
Ingo Molnar 已提交
1235 1236 1237 1238 1239
}

/*
 * Adding/removing a task to/from a priority array:
 */
1240
static void
1241
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
P
Peter Zijlstra 已提交
1242 1243 1244
{
	struct sched_rt_entity *rt_se = &p->rt;

1245
	if (flags & ENQUEUE_WAKEUP)
P
Peter Zijlstra 已提交
1246 1247
		rt_se->timeout = 0;

1248
	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1249

1250
	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1251
		enqueue_pushable_task(rq, p);
P
Peter Zijlstra 已提交
1252 1253
}

1254
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
I
Ingo Molnar 已提交
1255
{
P
Peter Zijlstra 已提交
1256
	struct sched_rt_entity *rt_se = &p->rt;
I
Ingo Molnar 已提交
1257

1258
	update_curr_rt(rq);
1259
	dequeue_rt_entity(rt_se);
1260

1261
	dequeue_pushable_task(rq, p);
I
Ingo Molnar 已提交
1262 1263 1264
}

/*
1265 1266
 * Put task to the head or the end of the run list without the overhead of
 * dequeue followed by enqueue.
I
Ingo Molnar 已提交
1267
 */
1268 1269
static void
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
P
Peter Zijlstra 已提交
1270
{
1271
	if (on_rt_rq(rt_se)) {
1272 1273 1274 1275 1276 1277 1278
		struct rt_prio_array *array = &rt_rq->active;
		struct list_head *queue = array->queue + rt_se_prio(rt_se);

		if (head)
			list_move(&rt_se->run_list, queue);
		else
			list_move_tail(&rt_se->run_list, queue);
1279
	}
P
Peter Zijlstra 已提交
1280 1281
}

1282
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
I
Ingo Molnar 已提交
1283
{
P
Peter Zijlstra 已提交
1284 1285
	struct sched_rt_entity *rt_se = &p->rt;
	struct rt_rq *rt_rq;
I
Ingo Molnar 已提交
1286

P
Peter Zijlstra 已提交
1287 1288
	for_each_sched_rt_entity(rt_se) {
		rt_rq = rt_rq_of_se(rt_se);
1289
		requeue_rt_entity(rt_rq, rt_se, head);
P
Peter Zijlstra 已提交
1290
	}
I
Ingo Molnar 已提交
1291 1292
}

P
Peter Zijlstra 已提交
1293
static void yield_task_rt(struct rq *rq)
I
Ingo Molnar 已提交
1294
{
1295
	requeue_task_rt(rq, rq->curr, 0);
I
Ingo Molnar 已提交
1296 1297
}

1298
#ifdef CONFIG_SMP
1299 1300
static int find_lowest_rq(struct task_struct *task);

1301
static int
1302
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1303
{
1304 1305
	struct task_struct *curr;
	struct rq *rq;
1306 1307 1308 1309 1310

	/* For anything but wake ups, just return the task_cpu */
	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
		goto out;

1311 1312 1313 1314 1315
	rq = cpu_rq(cpu);

	rcu_read_lock();
	curr = ACCESS_ONCE(rq->curr); /* unlocked access */

1316
	/*
1317
	 * If the current task on @p's runqueue is an RT task, then
1318 1319 1320 1321
	 * try to see if we can wake this RT task up on another
	 * runqueue. Otherwise simply start this RT task
	 * on its current runqueue.
	 *
1322 1323 1324 1325 1326 1327 1328 1329 1330
	 * We want to avoid overloading runqueues. If the woken
	 * task is a higher priority, then it will stay on this CPU
	 * and the lower prio task should be moved to another CPU.
	 * Even though this will probably make the lower prio task
	 * lose its cache, we do not want to bounce a higher task
	 * around just because it gave up its CPU, perhaps for a
	 * lock?
	 *
	 * For equal prio tasks, we just let the scheduler sort it out.
1331 1332 1333 1334 1335 1336
	 *
	 * Otherwise, just let it ride on the affined RQ and the
	 * post-schedule router will push the preempted task away
	 *
	 * This test is optimistic, if we get it wrong the load-balancer
	 * will have to sort it out.
1337
	 */
1338
	if (curr && unlikely(rt_task(curr)) &&
1339
	    (curr->nr_cpus_allowed < 2 ||
1340
	     curr->prio <= p->prio)) {
1341
		int target = find_lowest_rq(p);
1342

1343 1344 1345 1346 1347 1348
		/*
		 * Don't bother moving it if the destination CPU is
		 * not running a lower priority task.
		 */
		if (target != -1 &&
		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1349
			cpu = target;
1350
	}
1351
	rcu_read_unlock();
1352

1353
out:
1354
	return cpu;
1355
}
1356 1357 1358

static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
1359 1360 1361 1362 1363 1364
	/*
	 * Current can't be migrated, useless to reschedule,
	 * let's hope p can move out.
	 */
	if (rq->curr->nr_cpus_allowed == 1 ||
	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1365 1366
		return;

1367 1368 1369 1370
	/*
	 * p is migratable, so let's not schedule it and
	 * see if it is pushed or pulled somewhere else.
	 */
1371
	if (p->nr_cpus_allowed != 1
1372 1373
	    && cpupri_find(&rq->rd->cpupri, p, NULL))
		return;
1374

1375 1376 1377 1378 1379 1380
	/*
	 * There appears to be other cpus that can accept
	 * current and none to run 'p', so lets reschedule
	 * to try and push current away:
	 */
	requeue_task_rt(rq, p, 1);
1381
	resched_curr(rq);
1382 1383
}

1384 1385
#endif /* CONFIG_SMP */

I
Ingo Molnar 已提交
1386 1387 1388
/*
 * Preempt the current task with a newly woken task if needed:
 */
P
Peter Zijlstra 已提交
1389
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
I
Ingo Molnar 已提交
1390
{
1391
	if (p->prio < rq->curr->prio) {
1392
		resched_curr(rq);
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
		return;
	}

#ifdef CONFIG_SMP
	/*
	 * If:
	 *
	 * - the newly woken task is of equal priority to the current task
	 * - the newly woken task is non-migratable while current is migratable
	 * - current will be preempted on the next reschedule
	 *
	 * we should check to see if current can readily move to a different
	 * cpu.  If so, we will reschedule to allow the push logic to try
	 * to move current somewhere else, making room for our non-migratable
	 * task.
	 */
1409
	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1410
		check_preempt_equal_prio(rq, p);
1411
#endif
I
Ingo Molnar 已提交
1412 1413
}

P
Peter Zijlstra 已提交
1414 1415
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
						   struct rt_rq *rt_rq)
I
Ingo Molnar 已提交
1416
{
P
Peter Zijlstra 已提交
1417 1418
	struct rt_prio_array *array = &rt_rq->active;
	struct sched_rt_entity *next = NULL;
I
Ingo Molnar 已提交
1419 1420 1421 1422
	struct list_head *queue;
	int idx;

	idx = sched_find_first_bit(array->bitmap);
P
Peter Zijlstra 已提交
1423
	BUG_ON(idx >= MAX_RT_PRIO);
I
Ingo Molnar 已提交
1424 1425

	queue = array->queue + idx;
P
Peter Zijlstra 已提交
1426
	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1427

P
Peter Zijlstra 已提交
1428 1429
	return next;
}
I
Ingo Molnar 已提交
1430

1431
static struct task_struct *_pick_next_task_rt(struct rq *rq)
P
Peter Zijlstra 已提交
1432 1433 1434
{
	struct sched_rt_entity *rt_se;
	struct task_struct *p;
1435
	struct rt_rq *rt_rq  = &rq->rt;
P
Peter Zijlstra 已提交
1436 1437 1438

	do {
		rt_se = pick_next_rt_entity(rq, rt_rq);
1439
		BUG_ON(!rt_se);
P
Peter Zijlstra 已提交
1440 1441 1442 1443
		rt_rq = group_rt_rq(rt_se);
	} while (rt_rq);

	p = rt_task_of(rt_se);
1444
	p->se.exec_start = rq_clock_task(rq);
1445 1446 1447 1448

	return p;
}

1449 1450
static struct task_struct *
pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1451
{
1452 1453 1454
	struct task_struct *p;
	struct rt_rq *rt_rq = &rq->rt;

1455
	if (need_pull_rt_task(rq, prev)) {
1456
		pull_rt_task(rq);
1457 1458
		/*
		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1459 1460
		 * means a dl or stop task can slip in, in which case we need
		 * to re-start task selection.
1461
		 */
1462
		if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1463
			     rq->dl.dl_nr_running))
1464 1465
			return RETRY_TASK;
	}
1466

1467 1468 1469 1470 1471 1472 1473
	/*
	 * We may dequeue prev's rt_rq in put_prev_task().
	 * So, we update time before rt_nr_running check.
	 */
	if (prev->sched_class == &rt_sched_class)
		update_curr_rt(rq);

1474
	if (!rt_rq->rt_queued)
1475 1476
		return NULL;

1477
	put_prev_task(rq, prev);
1478 1479

	p = _pick_next_task_rt(rq);
1480 1481

	/* The running task is never eligible for pushing */
1482
	dequeue_pushable_task(rq, p);
1483

P
Peter Zijlstra 已提交
1484
	set_post_schedule(rq);
1485

P
Peter Zijlstra 已提交
1486
	return p;
I
Ingo Molnar 已提交
1487 1488
}

1489
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
I
Ingo Molnar 已提交
1490
{
1491
	update_curr_rt(rq);
1492 1493 1494 1495 1496

	/*
	 * The previous task needs to be made eligible for pushing
	 * if it is still active
	 */
1497
	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1498
		enqueue_pushable_task(rq, p);
I
Ingo Molnar 已提交
1499 1500
}

1501
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1502

S
Steven Rostedt 已提交
1503 1504 1505
/* Only try algorithms three times */
#define RT_MAX_TRIES 3

1506 1507 1508
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
	if (!task_running(rq, p) &&
1509
	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1510 1511 1512 1513
		return 1;
	return 0;
}

1514 1515 1516 1517 1518
/*
 * Return the highest pushable rq's task, which is suitable to be executed
 * on the cpu, NULL otherwise
 */
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
S
Steven Rostedt 已提交
1519
{
1520 1521
	struct plist_head *head = &rq->rt.pushable_tasks;
	struct task_struct *p;
1522

1523 1524
	if (!has_pushable_tasks(rq))
		return NULL;
1525

1526 1527 1528
	plist_for_each_entry(p, head, pushable_tasks) {
		if (pick_rt_task(rq, p, cpu))
			return p;
1529 1530
	}

1531
	return NULL;
S
Steven Rostedt 已提交
1532 1533
}

1534
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
S
Steven Rostedt 已提交
1535

G
Gregory Haskins 已提交
1536 1537 1538
static int find_lowest_rq(struct task_struct *task)
{
	struct sched_domain *sd;
1539
	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
G
Gregory Haskins 已提交
1540 1541
	int this_cpu = smp_processor_id();
	int cpu      = task_cpu(task);
G
Gregory Haskins 已提交
1542

1543 1544 1545 1546
	/* Make sure the mask is initialized first */
	if (unlikely(!lowest_mask))
		return -1;

1547
	if (task->nr_cpus_allowed == 1)
1548
		return -1; /* No other targets possible */
G
Gregory Haskins 已提交
1549

1550 1551
	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
		return -1; /* No targets found */
G
Gregory Haskins 已提交
1552 1553 1554 1555 1556 1557 1558 1559 1560

	/*
	 * At this point we have built a mask of cpus representing the
	 * lowest priority tasks in the system.  Now we want to elect
	 * the best one based on our affinity and topology.
	 *
	 * We prioritize the last cpu that the task executed on since
	 * it is most likely cache-hot in that location.
	 */
1561
	if (cpumask_test_cpu(cpu, lowest_mask))
G
Gregory Haskins 已提交
1562 1563 1564 1565 1566 1567
		return cpu;

	/*
	 * Otherwise, we consult the sched_domains span maps to figure
	 * out which cpu is logically closest to our hot cache data.
	 */
R
Rusty Russell 已提交
1568 1569
	if (!cpumask_test_cpu(this_cpu, lowest_mask))
		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
G
Gregory Haskins 已提交
1570

1571
	rcu_read_lock();
R
Rusty Russell 已提交
1572 1573 1574
	for_each_domain(cpu, sd) {
		if (sd->flags & SD_WAKE_AFFINE) {
			int best_cpu;
G
Gregory Haskins 已提交
1575

R
Rusty Russell 已提交
1576 1577 1578 1579 1580
			/*
			 * "this_cpu" is cheaper to preempt than a
			 * remote processor.
			 */
			if (this_cpu != -1 &&
1581 1582
			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
				rcu_read_unlock();
R
Rusty Russell 已提交
1583
				return this_cpu;
1584
			}
R
Rusty Russell 已提交
1585 1586 1587

			best_cpu = cpumask_first_and(lowest_mask,
						     sched_domain_span(sd));
1588 1589
			if (best_cpu < nr_cpu_ids) {
				rcu_read_unlock();
R
Rusty Russell 已提交
1590
				return best_cpu;
1591
			}
G
Gregory Haskins 已提交
1592 1593
		}
	}
1594
	rcu_read_unlock();
G
Gregory Haskins 已提交
1595 1596 1597 1598 1599 1600

	/*
	 * And finally, if there were no matches within the domains
	 * just give the caller *something* to work with from the compatible
	 * locations.
	 */
R
Rusty Russell 已提交
1601 1602 1603 1604 1605 1606 1607
	if (this_cpu != -1)
		return this_cpu;

	cpu = cpumask_any(lowest_mask);
	if (cpu < nr_cpu_ids)
		return cpu;
	return -1;
1608 1609 1610
}

/* Will lock the rq it finds */
1611
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1612 1613 1614
{
	struct rq *lowest_rq = NULL;
	int tries;
1615
	int cpu;
S
Steven Rostedt 已提交
1616

1617 1618 1619
	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
		cpu = find_lowest_rq(task);

1620
		if ((cpu == -1) || (cpu == rq->cpu))
S
Steven Rostedt 已提交
1621 1622
			break;

1623 1624
		lowest_rq = cpu_rq(cpu);

1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
			/*
			 * Target rq has tasks of equal or higher priority,
			 * retrying does not release any lock and is unlikely
			 * to yield a different result.
			 */
			lowest_rq = NULL;
			break;
		}

S
Steven Rostedt 已提交
1635
		/* if the prio of this runqueue changed, try again */
1636
		if (double_lock_balance(rq, lowest_rq)) {
S
Steven Rostedt 已提交
1637 1638 1639 1640 1641 1642
			/*
			 * We had to unlock the run queue. In
			 * the mean time, task could have
			 * migrated already or had its affinity changed.
			 * Also make sure that it wasn't scheduled on its rq.
			 */
1643
			if (unlikely(task_rq(task) != rq ||
1644
				     !cpumask_test_cpu(lowest_rq->cpu,
1645
						       tsk_cpus_allowed(task)) ||
1646
				     task_running(rq, task) ||
1647
				     !task_on_rq_queued(task))) {
1648

1649
				double_unlock_balance(rq, lowest_rq);
S
Steven Rostedt 已提交
1650 1651 1652 1653 1654 1655
				lowest_rq = NULL;
				break;
			}
		}

		/* If this rq is still suitable use it. */
1656
		if (lowest_rq->rt.highest_prio.curr > task->prio)
S
Steven Rostedt 已提交
1657 1658 1659
			break;

		/* try again */
1660
		double_unlock_balance(rq, lowest_rq);
S
Steven Rostedt 已提交
1661 1662 1663 1664 1665 1666
		lowest_rq = NULL;
	}

	return lowest_rq;
}

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
	struct task_struct *p;

	if (!has_pushable_tasks(rq))
		return NULL;

	p = plist_first_entry(&rq->rt.pushable_tasks,
			      struct task_struct, pushable_tasks);

	BUG_ON(rq->cpu != task_cpu(p));
	BUG_ON(task_current(rq, p));
1679
	BUG_ON(p->nr_cpus_allowed <= 1);
1680

1681
	BUG_ON(!task_on_rq_queued(p));
1682 1683 1684 1685 1686
	BUG_ON(!rt_task(p));

	return p;
}

S
Steven Rostedt 已提交
1687 1688 1689 1690 1691
/*
 * If the current CPU has more than one RT task, see if the non
 * running task can migrate over to a CPU that is running a task
 * of lesser priority.
 */
1692
static int push_rt_task(struct rq *rq)
S
Steven Rostedt 已提交
1693 1694 1695
{
	struct task_struct *next_task;
	struct rq *lowest_rq;
1696
	int ret = 0;
S
Steven Rostedt 已提交
1697

G
Gregory Haskins 已提交
1698 1699 1700
	if (!rq->rt.overloaded)
		return 0;

1701
	next_task = pick_next_pushable_task(rq);
S
Steven Rostedt 已提交
1702 1703 1704
	if (!next_task)
		return 0;

P
Peter Zijlstra 已提交
1705
retry:
1706
	if (unlikely(next_task == rq->curr)) {
1707
		WARN_ON(1);
S
Steven Rostedt 已提交
1708
		return 0;
1709
	}
S
Steven Rostedt 已提交
1710 1711 1712 1713 1714 1715

	/*
	 * It's possible that the next_task slipped in of
	 * higher priority than current. If that's the case
	 * just reschedule current.
	 */
1716
	if (unlikely(next_task->prio < rq->curr->prio)) {
1717
		resched_curr(rq);
S
Steven Rostedt 已提交
1718 1719 1720
		return 0;
	}

1721
	/* We might release rq lock */
S
Steven Rostedt 已提交
1722 1723 1724
	get_task_struct(next_task);

	/* find_lock_lowest_rq locks the rq if found */
1725
	lowest_rq = find_lock_lowest_rq(next_task, rq);
S
Steven Rostedt 已提交
1726 1727 1728
	if (!lowest_rq) {
		struct task_struct *task;
		/*
1729
		 * find_lock_lowest_rq releases rq->lock
1730 1731 1732 1733 1734
		 * so it is possible that next_task has migrated.
		 *
		 * We need to make sure that the task is still on the same
		 * run-queue and is also still the next task eligible for
		 * pushing.
S
Steven Rostedt 已提交
1735
		 */
1736
		task = pick_next_pushable_task(rq);
1737 1738
		if (task_cpu(next_task) == rq->cpu && task == next_task) {
			/*
1739 1740 1741 1742
			 * The task hasn't migrated, and is still the next
			 * eligible task, but we failed to find a run-queue
			 * to push it to.  Do not retry in this case, since
			 * other cpus will pull from us when ready.
1743 1744
			 */
			goto out;
S
Steven Rostedt 已提交
1745
		}
1746

1747 1748 1749 1750
		if (!task)
			/* No more tasks, just exit */
			goto out;

1751
		/*
1752
		 * Something has shifted, try again.
1753
		 */
1754 1755 1756
		put_task_struct(next_task);
		next_task = task;
		goto retry;
S
Steven Rostedt 已提交
1757 1758
	}

1759
	deactivate_task(rq, next_task, 0);
S
Steven Rostedt 已提交
1760 1761
	set_task_cpu(next_task, lowest_rq->cpu);
	activate_task(lowest_rq, next_task, 0);
1762
	ret = 1;
S
Steven Rostedt 已提交
1763

1764
	resched_curr(lowest_rq);
S
Steven Rostedt 已提交
1765

1766
	double_unlock_balance(rq, lowest_rq);
S
Steven Rostedt 已提交
1767 1768 1769 1770

out:
	put_task_struct(next_task);

1771
	return ret;
S
Steven Rostedt 已提交
1772 1773 1774 1775 1776 1777 1778 1779 1780
}

static void push_rt_tasks(struct rq *rq)
{
	/* push_rt_task will return true if it moved an RT */
	while (push_rt_task(rq))
		;
}

1781 1782
static int pull_rt_task(struct rq *this_rq)
{
I
Ingo Molnar 已提交
1783
	int this_cpu = this_rq->cpu, ret = 0, cpu;
1784
	struct task_struct *p;
1785 1786
	struct rq *src_rq;

1787
	if (likely(!rt_overloaded(this_rq)))
1788 1789
		return 0;

P
Peter Zijlstra 已提交
1790 1791 1792 1793 1794 1795
	/*
	 * Match the barrier from rt_set_overloaded; this guarantees that if we
	 * see overloaded we must also see the rto_mask bit.
	 */
	smp_rmb();

1796
	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1797 1798 1799 1800
		if (this_cpu == cpu)
			continue;

		src_rq = cpu_rq(cpu);
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812

		/*
		 * Don't bother taking the src_rq->lock if the next highest
		 * task is known to be lower-priority than our current task.
		 * This may look racy, but if this value is about to go
		 * logically higher, the src_rq will push this task away.
		 * And if its going logically lower, we do not care
		 */
		if (src_rq->rt.highest_prio.next >=
		    this_rq->rt.highest_prio.curr)
			continue;

1813 1814 1815
		/*
		 * We can potentially drop this_rq's lock in
		 * double_lock_balance, and another CPU could
1816
		 * alter this_rq
1817
		 */
1818
		double_lock_balance(this_rq, src_rq);
1819 1820

		/*
1821 1822
		 * We can pull only a task, which is pushable
		 * on its rq, and no others.
1823
		 */
1824
		p = pick_highest_pushable_task(src_rq, this_cpu);
1825 1826 1827 1828 1829

		/*
		 * Do we have an RT task that preempts
		 * the to-be-scheduled task?
		 */
1830
		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1831
			WARN_ON(p == src_rq->curr);
1832
			WARN_ON(!task_on_rq_queued(p));
1833 1834 1835 1836 1837 1838 1839

			/*
			 * There's a chance that p is higher in priority
			 * than what's currently running on its cpu.
			 * This is just that p is wakeing up and hasn't
			 * had a chance to schedule. We only pull
			 * p if it is lower in priority than the
1840
			 * current task on the run queue
1841
			 */
1842
			if (p->prio < src_rq->curr->prio)
M
Mike Galbraith 已提交
1843
				goto skip;
1844 1845 1846 1847 1848 1849 1850 1851 1852

			ret = 1;

			deactivate_task(src_rq, p, 0);
			set_task_cpu(p, this_cpu);
			activate_task(this_rq, p, 0);
			/*
			 * We continue with the search, just in
			 * case there's an even higher prio task
L
Lucas De Marchi 已提交
1853
			 * in another runqueue. (low likelihood
1854 1855 1856
			 * but possible)
			 */
		}
P
Peter Zijlstra 已提交
1857
skip:
1858
		double_unlock_balance(this_rq, src_rq);
1859 1860 1861 1862 1863
	}

	return ret;
}

1864
static void post_schedule_rt(struct rq *rq)
S
Steven Rostedt 已提交
1865
{
1866
	push_rt_tasks(rq);
S
Steven Rostedt 已提交
1867 1868
}

1869 1870 1871 1872
/*
 * If we are not running and we are not going to reschedule soon, we should
 * try to push tasks away now
 */
1873
static void task_woken_rt(struct rq *rq, struct task_struct *p)
1874
{
1875
	if (!task_running(rq, p) &&
1876
	    !test_tsk_need_resched(rq->curr) &&
1877
	    has_pushable_tasks(rq) &&
1878
	    p->nr_cpus_allowed > 1 &&
1879
	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
1880
	    (rq->curr->nr_cpus_allowed < 2 ||
1881
	     rq->curr->prio <= p->prio))
1882 1883 1884
		push_rt_tasks(rq);
}

1885
static void set_cpus_allowed_rt(struct task_struct *p,
1886
				const struct cpumask *new_mask)
1887
{
1888 1889
	struct rq *rq;
	int weight;
1890 1891 1892

	BUG_ON(!rt_task(p));

1893
	if (!task_on_rq_queued(p))
1894
		return;
1895

1896
	weight = cpumask_weight(new_mask);
1897

1898 1899 1900 1901
	/*
	 * Only update if the process changes its state from whether it
	 * can migrate or not.
	 */
1902
	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1903
		return;
1904

1905
	rq = task_rq(p);
1906

1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
	/*
	 * The process used to be able to migrate OR it can now migrate
	 */
	if (weight <= 1) {
		if (!task_current(rq, p))
			dequeue_pushable_task(rq, p);
		BUG_ON(!rq->rt.rt_nr_migratory);
		rq->rt.rt_nr_migratory--;
	} else {
		if (!task_current(rq, p))
			enqueue_pushable_task(rq, p);
		rq->rt.rt_nr_migratory++;
1919
	}
1920 1921

	update_rt_migration(&rq->rt);
1922
}
1923

1924
/* Assumes rq->lock is held */
1925
static void rq_online_rt(struct rq *rq)
1926 1927 1928
{
	if (rq->rt.overloaded)
		rt_set_overload(rq);
1929

P
Peter Zijlstra 已提交
1930 1931
	__enable_runtime(rq);

1932
	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1933 1934 1935
}

/* Assumes rq->lock is held */
1936
static void rq_offline_rt(struct rq *rq)
1937 1938 1939
{
	if (rq->rt.overloaded)
		rt_clear_overload(rq);
1940

P
Peter Zijlstra 已提交
1941 1942
	__disable_runtime(rq);

1943
	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1944
}
1945 1946 1947 1948 1949

/*
 * When switch from the rt queue, we bring ourselves to a position
 * that we might want to pull RT tasks from other runqueues.
 */
P
Peter Zijlstra 已提交
1950
static void switched_from_rt(struct rq *rq, struct task_struct *p)
1951 1952 1953 1954 1955 1956 1957 1958
{
	/*
	 * If there are other RT tasks then we will reschedule
	 * and the scheduling of the other RT tasks will handle
	 * the balancing. But if we are the last RT task
	 * we may need to handle the pulling of RT tasks
	 * now.
	 */
1959
	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
1960 1961 1962
		return;

	if (pull_rt_task(rq))
1963
		resched_curr(rq);
1964
}
1965

1966
void __init init_sched_rt_class(void)
1967 1968 1969
{
	unsigned int i;

1970
	for_each_possible_cpu(i) {
1971
		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1972
					GFP_KERNEL, cpu_to_node(i));
1973
	}
1974
}
1975 1976 1977 1978 1979 1980 1981
#endif /* CONFIG_SMP */

/*
 * When switching a task to RT, we may overload the runqueue
 * with RT tasks. In this case we try to push them off to
 * other runqueues.
 */
P
Peter Zijlstra 已提交
1982
static void switched_to_rt(struct rq *rq, struct task_struct *p)
1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
{
	int check_resched = 1;

	/*
	 * If we are already running, then there's nothing
	 * that needs to be done. But if we are not running
	 * we may need to preempt the current running task.
	 * If that current running task is also an RT task
	 * then see if we can move to another run queue.
	 */
1993
	if (task_on_rq_queued(p) && rq->curr != p) {
1994
#ifdef CONFIG_SMP
1995
		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
1996
		    /* Don't resched if we changed runqueues */
1997
		    push_rt_task(rq) && rq != task_rq(p))
1998 1999 2000
			check_resched = 0;
#endif /* CONFIG_SMP */
		if (check_resched && p->prio < rq->curr->prio)
2001
			resched_curr(rq);
2002 2003 2004 2005 2006 2007 2008
	}
}

/*
 * Priority of the task has changed. This may cause
 * us to initiate a push or pull.
 */
P
Peter Zijlstra 已提交
2009 2010
static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2011
{
2012
	if (!task_on_rq_queued(p))
P
Peter Zijlstra 已提交
2013 2014 2015
		return;

	if (rq->curr == p) {
2016 2017 2018 2019 2020 2021 2022 2023 2024
#ifdef CONFIG_SMP
		/*
		 * If our priority decreases while running, we
		 * may need to pull tasks to this runqueue.
		 */
		if (oldprio < p->prio)
			pull_rt_task(rq);
		/*
		 * If there's a higher priority task waiting to run
2025 2026 2027
		 * then reschedule. Note, the above pull_rt_task
		 * can release the rq lock and p could migrate.
		 * Only reschedule if p is still on the same runqueue.
2028
		 */
2029
		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
2030
			resched_curr(rq);
2031 2032 2033
#else
		/* For UP simply resched on drop of prio */
		if (oldprio < p->prio)
2034
			resched_curr(rq);
S
Steven Rostedt 已提交
2035
#endif /* CONFIG_SMP */
2036 2037 2038 2039 2040 2041 2042
	} else {
		/*
		 * This task is not running, but if it is
		 * greater than the current running task
		 * then reschedule.
		 */
		if (p->prio < rq->curr->prio)
2043
			resched_curr(rq);
2044 2045 2046
	}
}

2047 2048 2049 2050
static void watchdog(struct rq *rq, struct task_struct *p)
{
	unsigned long soft, hard;

2051 2052 2053
	/* max may change after cur was read, this will be fixed next tick */
	soft = task_rlimit(p, RLIMIT_RTTIME);
	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2054 2055 2056 2057

	if (soft != RLIM_INFINITY) {
		unsigned long next;

2058 2059 2060 2061 2062
		if (p->rt.watchdog_stamp != jiffies) {
			p->rt.timeout++;
			p->rt.watchdog_stamp = jiffies;
		}

2063
		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2064
		if (p->rt.timeout > next)
2065
			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2066 2067
	}
}
I
Ingo Molnar 已提交
2068

P
Peter Zijlstra 已提交
2069
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
I
Ingo Molnar 已提交
2070
{
2071 2072
	struct sched_rt_entity *rt_se = &p->rt;

2073 2074
	update_curr_rt(rq);

2075 2076
	watchdog(rq, p);

I
Ingo Molnar 已提交
2077 2078 2079 2080 2081 2082 2083
	/*
	 * RR tasks need a special form of timeslice management.
	 * FIFO tasks have no timeslices.
	 */
	if (p->policy != SCHED_RR)
		return;

P
Peter Zijlstra 已提交
2084
	if (--p->rt.time_slice)
I
Ingo Molnar 已提交
2085 2086
		return;

2087
	p->rt.time_slice = sched_rr_timeslice;
I
Ingo Molnar 已提交
2088

2089
	/*
L
Li Bin 已提交
2090 2091
	 * Requeue to the end of queue if we (and all of our ancestors) are not
	 * the only element on the queue
2092
	 */
2093 2094 2095
	for_each_sched_rt_entity(rt_se) {
		if (rt_se->run_list.prev != rt_se->run_list.next) {
			requeue_task_rt(rq, p, 0);
2096
			resched_curr(rq);
2097 2098
			return;
		}
2099
	}
I
Ingo Molnar 已提交
2100 2101
}

2102 2103 2104 2105
static void set_curr_task_rt(struct rq *rq)
{
	struct task_struct *p = rq->curr;

2106
	p->se.exec_start = rq_clock_task(rq);
2107 2108 2109

	/* The running task is never eligible for pushing */
	dequeue_pushable_task(rq, p);
2110 2111
}

2112
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2113 2114 2115 2116 2117
{
	/*
	 * Time slice is 0 for SCHED_FIFO tasks
	 */
	if (task->policy == SCHED_RR)
2118
		return sched_rr_timeslice;
2119 2120 2121 2122
	else
		return 0;
}

2123
const struct sched_class rt_sched_class = {
2124
	.next			= &fair_sched_class,
I
Ingo Molnar 已提交
2125 2126 2127 2128 2129 2130 2131 2132 2133
	.enqueue_task		= enqueue_task_rt,
	.dequeue_task		= dequeue_task_rt,
	.yield_task		= yield_task_rt,

	.check_preempt_curr	= check_preempt_curr_rt,

	.pick_next_task		= pick_next_task_rt,
	.put_prev_task		= put_prev_task_rt,

2134
#ifdef CONFIG_SMP
L
Li Zefan 已提交
2135 2136
	.select_task_rq		= select_task_rq_rt,

2137
	.set_cpus_allowed       = set_cpus_allowed_rt,
2138 2139
	.rq_online              = rq_online_rt,
	.rq_offline             = rq_offline_rt,
2140
	.post_schedule		= post_schedule_rt,
2141
	.task_woken		= task_woken_rt,
2142
	.switched_from		= switched_from_rt,
2143
#endif
I
Ingo Molnar 已提交
2144

2145
	.set_curr_task          = set_curr_task_rt,
I
Ingo Molnar 已提交
2146
	.task_tick		= task_tick_rt,
2147

2148 2149
	.get_rr_interval	= get_rr_interval_rt,

2150 2151
	.prio_changed		= prio_changed_rt,
	.switched_to		= switched_to_rt,
2152 2153

	.update_curr		= update_curr_rt,
I
Ingo Molnar 已提交
2154
};
2155 2156 2157 2158

#ifdef CONFIG_SCHED_DEBUG
extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);

2159
void print_rt_stats(struct seq_file *m, int cpu)
2160
{
C
Cheng Xu 已提交
2161
	rt_rq_iter_t iter;
2162 2163 2164
	struct rt_rq *rt_rq;

	rcu_read_lock();
C
Cheng Xu 已提交
2165
	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2166 2167 2168
		print_rt_rq(m, cpu, rt_rq);
	rcu_read_unlock();
}
2169
#endif /* CONFIG_SCHED_DEBUG */