sched_rt.c 20.1 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5
/*
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
 * policies)
 */

S
Steven Rostedt 已提交
6
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
7 8 9 10 11

/*
 * The "RT overload" flag: it gets set if a CPU has more than
 * one runnable RT task.
 */
S
Steven Rostedt 已提交
12 13
static cpumask_t rt_overload_mask;
static atomic_t rto_count;
I
Ingo Molnar 已提交
14

S
Steven Rostedt 已提交
15 16 17 18
static inline int rt_overloaded(void)
{
	return atomic_read(&rto_count);
}
I
Ingo Molnar 已提交
19

S
Steven Rostedt 已提交
20 21 22 23
static inline cpumask_t *rt_overload(void)
{
	return &rt_overload_mask;
}
I
Ingo Molnar 已提交
24

S
Steven Rostedt 已提交
25 26
static inline void rt_set_overload(struct rq *rq)
{
G
Gregory Haskins 已提交
27
	rq->rt.overloaded = 1;
S
Steven Rostedt 已提交
28 29 30 31 32 33 34 35 36 37 38
	cpu_set(rq->cpu, rt_overload_mask);
	/*
	 * Make sure the mask is visible before we set
	 * the overload count. That is checked to determine
	 * if we should look at the mask. It would be a shame
	 * if we looked at the mask, but the mask was not
	 * updated yet.
	 */
	wmb();
	atomic_inc(&rto_count);
}
I
Ingo Molnar 已提交
39

S
Steven Rostedt 已提交
40 41 42 43 44
static inline void rt_clear_overload(struct rq *rq)
{
	/* the order here really doesn't matter */
	atomic_dec(&rto_count);
	cpu_clear(rq->cpu, rt_overload_mask);
G
Gregory Haskins 已提交
45
	rq->rt.overloaded = 0;
S
Steven Rostedt 已提交
46
}
47 48 49 50 51 52 53 54

static void update_rt_migration(struct rq *rq)
{
	if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
		rt_set_overload(rq);
	else
		rt_clear_overload(rq);
}
S
Steven Rostedt 已提交
55 56
#endif /* CONFIG_SMP */

I
Ingo Molnar 已提交
57 58 59 60
/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
A
Alexey Dobriyan 已提交
61
static void update_curr_rt(struct rq *rq)
I
Ingo Molnar 已提交
62 63 64 65 66 67 68
{
	struct task_struct *curr = rq->curr;
	u64 delta_exec;

	if (!task_has_rt_policy(curr))
		return;

69
	delta_exec = rq->clock - curr->se.exec_start;
I
Ingo Molnar 已提交
70 71
	if (unlikely((s64)delta_exec < 0))
		delta_exec = 0;
I
Ingo Molnar 已提交
72 73

	schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
I
Ingo Molnar 已提交
74 75

	curr->se.sum_exec_runtime += delta_exec;
76
	curr->se.exec_start = rq->clock;
77
	cpuacct_charge(curr, delta_exec);
I
Ingo Molnar 已提交
78 79
}

80 81 82 83
static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
{
	WARN_ON(!rt_task(p));
	rq->rt.rt_nr_running++;
84 85 86
#ifdef CONFIG_SMP
	if (p->prio < rq->rt.highest_prio)
		rq->rt.highest_prio = p->prio;
87 88 89 90
	if (p->nr_cpus_allowed > 1)
		rq->rt.rt_nr_migratory++;

	update_rt_migration(rq);
91
#endif /* CONFIG_SMP */
92 93 94 95 96 97 98
}

static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
{
	WARN_ON(!rt_task(p));
	WARN_ON(!rq->rt.rt_nr_running);
	rq->rt.rt_nr_running--;
99 100 101 102 103 104 105 106 107 108 109 110 111
#ifdef CONFIG_SMP
	if (rq->rt.rt_nr_running) {
		struct rt_prio_array *array;

		WARN_ON(p->prio < rq->rt.highest_prio);
		if (p->prio == rq->rt.highest_prio) {
			/* recalculate */
			array = &rq->rt.active;
			rq->rt.highest_prio =
				sched_find_first_bit(array->bitmap);
		} /* otherwise leave rq->highest prio alone */
	} else
		rq->rt.highest_prio = MAX_RT_PRIO;
112 113 114 115
	if (p->nr_cpus_allowed > 1)
		rq->rt.rt_nr_migratory--;

	update_rt_migration(rq);
116
#endif /* CONFIG_SMP */
117 118
}

119
static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
I
Ingo Molnar 已提交
120 121 122 123 124
{
	struct rt_prio_array *array = &rq->rt.active;

	list_add_tail(&p->run_list, array->queue + p->prio);
	__set_bit(p->prio, array->bitmap);
125
	inc_cpu_load(rq, p->se.load.weight);
126 127

	inc_rt_tasks(p, rq);
I
Ingo Molnar 已提交
128 129 130 131 132
}

/*
 * Adding/removing a task to/from a priority array:
 */
133
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
I
Ingo Molnar 已提交
134 135 136
{
	struct rt_prio_array *array = &rq->rt.active;

137
	update_curr_rt(rq);
I
Ingo Molnar 已提交
138 139 140 141

	list_del(&p->run_list);
	if (list_empty(array->queue + p->prio))
		__clear_bit(p->prio, array->bitmap);
142
	dec_cpu_load(rq, p->se.load.weight);
143 144

	dec_rt_tasks(p, rq);
I
Ingo Molnar 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158
}

/*
 * Put task to the end of the run list without the overhead of dequeue
 * followed by enqueue.
 */
static void requeue_task_rt(struct rq *rq, struct task_struct *p)
{
	struct rt_prio_array *array = &rq->rt.active;

	list_move_tail(&p->run_list, array->queue + p->prio);
}

static void
159
yield_task_rt(struct rq *rq)
I
Ingo Molnar 已提交
160
{
161
	requeue_task_rt(rq, rq->curr);
I
Ingo Molnar 已提交
162 163
}

164
#ifdef CONFIG_SMP
165 166
static int find_lowest_rq(struct task_struct *task);

167 168
static int select_task_rq_rt(struct task_struct *p, int sync)
{
169 170 171
	struct rq *rq = task_rq(p);

	/*
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	 * If the current task is an RT task, then
	 * try to see if we can wake this RT task up on another
	 * runqueue. Otherwise simply start this RT task
	 * on its current runqueue.
	 *
	 * We want to avoid overloading runqueues. Even if
	 * the RT task is of higher priority than the current RT task.
	 * RT tasks behave differently than other tasks. If
	 * one gets preempted, we try to push it off to another queue.
	 * So trying to keep a preempting RT task on the same
	 * cache hot CPU will force the running RT task to
	 * a cold CPU. So we waste all the cache for the lower
	 * RT task in hopes of saving some of a RT task
	 * that is just being woken and probably will have
	 * cold cache anyway.
187
	 */
188 189
	if (unlikely(rt_task(rq->curr)) &&
	    (p->nr_cpus_allowed > 1)) {
190 191 192 193 194 195 196 197 198
		int cpu = find_lowest_rq(p);

		return (cpu == -1) ? task_cpu(p) : cpu;
	}

	/*
	 * Otherwise, just let it ride on the affined RQ and the
	 * post-schedule router will push the preempted task away
	 */
199 200 201 202
	return task_cpu(p);
}
#endif /* CONFIG_SMP */

I
Ingo Molnar 已提交
203 204 205 206 207 208 209 210 211
/*
 * Preempt the current task with a newly woken task if needed:
 */
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
{
	if (p->prio < rq->curr->prio)
		resched_task(rq->curr);
}

212
static struct task_struct *pick_next_task_rt(struct rq *rq)
I
Ingo Molnar 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225
{
	struct rt_prio_array *array = &rq->rt.active;
	struct task_struct *next;
	struct list_head *queue;
	int idx;

	idx = sched_find_first_bit(array->bitmap);
	if (idx >= MAX_RT_PRIO)
		return NULL;

	queue = array->queue + idx;
	next = list_entry(queue->next, struct task_struct, run_list);

226
	next->se.exec_start = rq->clock;
I
Ingo Molnar 已提交
227 228 229 230

	return next;
}

231
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
I
Ingo Molnar 已提交
232
{
233
	update_curr_rt(rq);
I
Ingo Molnar 已提交
234 235 236
	p->se.exec_start = 0;
}

237
#ifdef CONFIG_SMP
S
Steven Rostedt 已提交
238 239 240 241 242 243
/* Only try algorithms three times */
#define RT_MAX_TRIES 3

static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);

244 245 246
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
	if (!task_running(rq, p) &&
247 248
	    (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
	    (p->nr_cpus_allowed > 1))
249 250 251 252
		return 1;
	return 0;
}

S
Steven Rostedt 已提交
253
/* Return the second highest RT task, NULL otherwise */
254
static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
S
Steven Rostedt 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
{
	struct rt_prio_array *array = &rq->rt.active;
	struct task_struct *next;
	struct list_head *queue;
	int idx;

	assert_spin_locked(&rq->lock);

	if (likely(rq->rt.rt_nr_running < 2))
		return NULL;

	idx = sched_find_first_bit(array->bitmap);
	if (unlikely(idx >= MAX_RT_PRIO)) {
		WARN_ON(1); /* rt_nr_running is bad */
		return NULL;
	}

	queue = array->queue + idx;
273 274
	BUG_ON(list_empty(queue));

S
Steven Rostedt 已提交
275
	next = list_entry(queue->next, struct task_struct, run_list);
276 277
	if (unlikely(pick_rt_task(rq, next, cpu)))
		goto out;
S
Steven Rostedt 已提交
278 279 280

	if (queue->next->next != queue) {
		/* same prio task */
281 282
		next = list_entry(queue->next->next, struct task_struct,
				  run_list);
283 284
		if (pick_rt_task(rq, next, cpu))
			goto out;
S
Steven Rostedt 已提交
285 286
	}

287
 retry:
S
Steven Rostedt 已提交
288 289
	/* slower, but more flexible */
	idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
290
	if (unlikely(idx >= MAX_RT_PRIO))
S
Steven Rostedt 已提交
291 292 293
		return NULL;

	queue = array->queue + idx;
294 295 296 297 298 299 300 301
	BUG_ON(list_empty(queue));

	list_for_each_entry(next, queue, run_list) {
		if (pick_rt_task(rq, next, cpu))
			goto out;
	}

	goto retry;
S
Steven Rostedt 已提交
302

303
 out:
S
Steven Rostedt 已提交
304 305 306 307 308
	return next;
}

static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);

G
Gregory Haskins 已提交
309
static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
S
Steven Rostedt 已提交
310
{
G
Gregory Haskins 已提交
311
	int       lowest_prio = -1;
312
	int       lowest_cpu  = -1;
G
Gregory Haskins 已提交
313
	int       count       = 0;
314
	int       cpu;
S
Steven Rostedt 已提交
315

316
	cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);
S
Steven Rostedt 已提交
317

318 319 320
	/*
	 * Scan each rq for the lowest prio.
	 */
321
	for_each_cpu_mask(cpu, *lowest_mask) {
322
		struct rq *rq = cpu_rq(cpu);
S
Steven Rostedt 已提交
323

324 325
		/* We look for lowest RT prio or non-rt CPU */
		if (rq->rt.highest_prio >= MAX_RT_PRIO) {
326 327 328 329 330 331 332 333 334
			/*
			 * if we already found a low RT queue
			 * and now we found this non-rt queue
			 * clear the mask and set our bit.
			 * Otherwise just return the queue as is
			 * and the count==1 will cause the algorithm
			 * to use the first bit found.
			 */
			if (lowest_cpu != -1) {
G
Gregory Haskins 已提交
335
				cpus_clear(*lowest_mask);
336 337
				cpu_set(rq->cpu, *lowest_mask);
			}
G
Gregory Haskins 已提交
338
			return 1;
339 340 341
		}

		/* no locking for now */
G
Gregory Haskins 已提交
342 343 344 345 346
		if ((rq->rt.highest_prio > task->prio)
		    && (rq->rt.highest_prio >= lowest_prio)) {
			if (rq->rt.highest_prio > lowest_prio) {
				/* new low - clear old data */
				lowest_prio = rq->rt.highest_prio;
347 348
				lowest_cpu = cpu;
				count = 0;
G
Gregory Haskins 已提交
349
			}
G
Gregory Haskins 已提交
350
			count++;
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
		} else
			cpu_clear(cpu, *lowest_mask);
	}

	/*
	 * Clear out all the set bits that represent
	 * runqueues that were of higher prio than
	 * the lowest_prio.
	 */
	if (lowest_cpu > 0) {
		/*
		 * Perhaps we could add another cpumask op to
		 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
		 * Then that could be optimized to use memset and such.
		 */
		for_each_cpu_mask(cpu, *lowest_mask) {
			if (cpu >= lowest_cpu)
				break;
			cpu_clear(cpu, *lowest_mask);
S
Steven Rostedt 已提交
370
		}
371 372
	}

G
Gregory Haskins 已提交
373
	return count;
G
Gregory Haskins 已提交
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
}

static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
{
	int first;

	/* "this_cpu" is cheaper to preempt than a remote processor */
	if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
		return this_cpu;

	first = first_cpu(*mask);
	if (first != NR_CPUS)
		return first;

	return -1;
}

static int find_lowest_rq(struct task_struct *task)
{
	struct sched_domain *sd;
	cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
	int this_cpu = smp_processor_id();
	int cpu      = task_cpu(task);
G
Gregory Haskins 已提交
397 398 399 400
	int count    = find_lowest_cpus(task, lowest_mask);

	if (!count)
		return -1; /* No targets found */
G
Gregory Haskins 已提交
401

G
Gregory Haskins 已提交
402 403 404 405 406 407
	/*
	 * There is no sense in performing an optimal search if only one
	 * target is found.
	 */
	if (count == 1)
		return first_cpu(*lowest_mask);
G
Gregory Haskins 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

	/*
	 * At this point we have built a mask of cpus representing the
	 * lowest priority tasks in the system.  Now we want to elect
	 * the best one based on our affinity and topology.
	 *
	 * We prioritize the last cpu that the task executed on since
	 * it is most likely cache-hot in that location.
	 */
	if (cpu_isset(cpu, *lowest_mask))
		return cpu;

	/*
	 * Otherwise, we consult the sched_domains span maps to figure
	 * out which cpu is logically closest to our hot cache data.
	 */
	if (this_cpu == cpu)
		this_cpu = -1; /* Skip this_cpu opt if the same */

	for_each_domain(cpu, sd) {
		if (sd->flags & SD_WAKE_AFFINE) {
			cpumask_t domain_mask;
			int       best_cpu;

			cpus_and(domain_mask, sd->span, *lowest_mask);

			best_cpu = pick_optimal_cpu(this_cpu,
						    &domain_mask);
			if (best_cpu != -1)
				return best_cpu;
		}
	}

	/*
	 * And finally, if there were no matches within the domains
	 * just give the caller *something* to work with from the compatible
	 * locations.
	 */
	return pick_optimal_cpu(this_cpu, lowest_mask);
447 448 449
}

/* Will lock the rq it finds */
450
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
451 452 453
{
	struct rq *lowest_rq = NULL;
	int tries;
454
	int cpu;
S
Steven Rostedt 已提交
455

456 457 458
	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
		cpu = find_lowest_rq(task);

459
		if ((cpu == -1) || (cpu == rq->cpu))
S
Steven Rostedt 已提交
460 461
			break;

462 463
		lowest_rq = cpu_rq(cpu);

S
Steven Rostedt 已提交
464
		/* if the prio of this runqueue changed, try again */
465
		if (double_lock_balance(rq, lowest_rq)) {
S
Steven Rostedt 已提交
466 467 468 469 470 471
			/*
			 * We had to unlock the run queue. In
			 * the mean time, task could have
			 * migrated already or had its affinity changed.
			 * Also make sure that it wasn't scheduled on its rq.
			 */
472
			if (unlikely(task_rq(task) != rq ||
473 474
				     !cpu_isset(lowest_rq->cpu,
						task->cpus_allowed) ||
475
				     task_running(rq, task) ||
S
Steven Rostedt 已提交
476
				     !task->se.on_rq)) {
477

S
Steven Rostedt 已提交
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
				spin_unlock(&lowest_rq->lock);
				lowest_rq = NULL;
				break;
			}
		}

		/* If this rq is still suitable use it. */
		if (lowest_rq->rt.highest_prio > task->prio)
			break;

		/* try again */
		spin_unlock(&lowest_rq->lock);
		lowest_rq = NULL;
	}

	return lowest_rq;
}

/*
 * If the current CPU has more than one RT task, see if the non
 * running task can migrate over to a CPU that is running a task
 * of lesser priority.
 */
501
static int push_rt_task(struct rq *rq)
S
Steven Rostedt 已提交
502 503 504 505 506 507
{
	struct task_struct *next_task;
	struct rq *lowest_rq;
	int ret = 0;
	int paranoid = RT_MAX_TRIES;

508
	assert_spin_locked(&rq->lock);
S
Steven Rostedt 已提交
509

G
Gregory Haskins 已提交
510 511 512
	if (!rq->rt.overloaded)
		return 0;

513
	next_task = pick_next_highest_task_rt(rq, -1);
S
Steven Rostedt 已提交
514 515 516 517
	if (!next_task)
		return 0;

 retry:
518
	if (unlikely(next_task == rq->curr)) {
519
		WARN_ON(1);
S
Steven Rostedt 已提交
520
		return 0;
521
	}
S
Steven Rostedt 已提交
522 523 524 525 526 527

	/*
	 * It's possible that the next_task slipped in of
	 * higher priority than current. If that's the case
	 * just reschedule current.
	 */
528 529
	if (unlikely(next_task->prio < rq->curr->prio)) {
		resched_task(rq->curr);
S
Steven Rostedt 已提交
530 531 532
		return 0;
	}

533
	/* We might release rq lock */
S
Steven Rostedt 已提交
534 535 536
	get_task_struct(next_task);

	/* find_lock_lowest_rq locks the rq if found */
537
	lowest_rq = find_lock_lowest_rq(next_task, rq);
S
Steven Rostedt 已提交
538 539 540
	if (!lowest_rq) {
		struct task_struct *task;
		/*
541
		 * find lock_lowest_rq releases rq->lock
S
Steven Rostedt 已提交
542 543 544
		 * so it is possible that next_task has changed.
		 * If it has, then try again.
		 */
545
		task = pick_next_highest_task_rt(rq, -1);
S
Steven Rostedt 已提交
546 547 548 549 550 551 552 553 554 555
		if (unlikely(task != next_task) && task && paranoid--) {
			put_task_struct(next_task);
			next_task = task;
			goto retry;
		}
		goto out;
	}

	assert_spin_locked(&lowest_rq->lock);

556
	deactivate_task(rq, next_task, 0);
S
Steven Rostedt 已提交
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	set_task_cpu(next_task, lowest_rq->cpu);
	activate_task(lowest_rq, next_task, 0);

	resched_task(lowest_rq->curr);

	spin_unlock(&lowest_rq->lock);

	ret = 1;
out:
	put_task_struct(next_task);

	return ret;
}

/*
 * TODO: Currently we just use the second highest prio task on
 *       the queue, and stop when it can't migrate (or there's
 *       no more RT tasks).  There may be a case where a lower
 *       priority RT task has a different affinity than the
 *       higher RT task. In this case the lower RT task could
 *       possibly be able to migrate where as the higher priority
 *       RT task could not.  We currently ignore this issue.
 *       Enhancements are welcome!
 */
static void push_rt_tasks(struct rq *rq)
{
	/* push_rt_task will return true if it moved an RT */
	while (push_rt_task(rq))
		;
}

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
static int pull_rt_task(struct rq *this_rq)
{
	struct task_struct *next;
	struct task_struct *p;
	struct rq *src_rq;
	cpumask_t *rto_cpumask;
	int this_cpu = this_rq->cpu;
	int cpu;
	int ret = 0;

	assert_spin_locked(&this_rq->lock);

	/*
	 * If cpusets are used, and we have overlapping
	 * run queue cpusets, then this algorithm may not catch all.
	 * This is just the price you pay on trying to keep
	 * dirtying caches down on large SMP machines.
	 */
	if (likely(!rt_overloaded()))
		return 0;

	next = pick_next_task_rt(this_rq);

	rto_cpumask = rt_overload();

	for_each_cpu_mask(cpu, *rto_cpumask) {
		if (this_cpu == cpu)
			continue;

		src_rq = cpu_rq(cpu);
		if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
			/*
			 * It is possible that overlapping cpusets
			 * will miss clearing a non overloaded runqueue.
			 * Clear it now.
			 */
			if (double_lock_balance(this_rq, src_rq)) {
				/* unlocked our runqueue lock */
				struct task_struct *old_next = next;
				next = pick_next_task_rt(this_rq);
				if (next != old_next)
					ret = 1;
			}
			if (likely(src_rq->rt.rt_nr_running <= 1))
				/*
				 * Small chance that this_rq->curr changed
				 * but it's really harmless here.
				 */
				rt_clear_overload(this_rq);
			else
				/*
				 * Heh, the src_rq is now overloaded, since
				 * we already have the src_rq lock, go straight
				 * to pulling tasks from it.
				 */
				goto try_pulling;
			spin_unlock(&src_rq->lock);
			continue;
		}

		/*
		 * We can potentially drop this_rq's lock in
		 * double_lock_balance, and another CPU could
		 * steal our next task - hence we must cause
		 * the caller to recalculate the next task
		 * in that case:
		 */
		if (double_lock_balance(this_rq, src_rq)) {
			struct task_struct *old_next = next;
			next = pick_next_task_rt(this_rq);
			if (next != old_next)
				ret = 1;
		}

		/*
		 * Are there still pullable RT tasks?
		 */
		if (src_rq->rt.rt_nr_running <= 1) {
			spin_unlock(&src_rq->lock);
			continue;
		}

 try_pulling:
		p = pick_next_highest_task_rt(src_rq, this_cpu);

		/*
		 * Do we have an RT task that preempts
		 * the to-be-scheduled task?
		 */
		if (p && (!next || (p->prio < next->prio))) {
			WARN_ON(p == src_rq->curr);
			WARN_ON(!p->se.on_rq);

			/*
			 * There's a chance that p is higher in priority
			 * than what's currently running on its cpu.
			 * This is just that p is wakeing up and hasn't
			 * had a chance to schedule. We only pull
			 * p if it is lower in priority than the
			 * current task on the run queue or
			 * this_rq next task is lower in prio than
			 * the current task on that rq.
			 */
			if (p->prio < src_rq->curr->prio ||
			    (next && next->prio < src_rq->curr->prio))
				goto bail;

			ret = 1;

			deactivate_task(src_rq, p, 0);
			set_task_cpu(p, this_cpu);
			activate_task(this_rq, p, 0);
			/*
			 * We continue with the search, just in
			 * case there's an even higher prio task
			 * in another runqueue. (low likelyhood
			 * but possible)
			 */

			/*
			 * Update next so that we won't pick a task
			 * on another cpu with a priority lower (or equal)
			 * than the one we just picked.
			 */
			next = p;

		}
 bail:
		spin_unlock(&src_rq->lock);
	}

	return ret;
}

static void schedule_balance_rt(struct rq *rq,
				struct task_struct *prev)
{
	/* Try to pull RT tasks here if we lower this rq's prio */
	if (unlikely(rt_task(prev)) &&
	    rq->rt.highest_prio > prev->prio)
		pull_rt_task(rq);
}

S
Steven Rostedt 已提交
731 732 733 734 735 736 737 738 739
static void schedule_tail_balance_rt(struct rq *rq)
{
	/*
	 * If we have more than one rt_task queued, then
	 * see if we can push the other rt_tasks off to other CPUS.
	 * Note we may release the rq lock, and since
	 * the lock was owned by prev, we need to release it
	 * first via finish_lock_switch and then reaquire it here.
	 */
G
Gregory Haskins 已提交
740
	if (unlikely(rq->rt.overloaded)) {
S
Steven Rostedt 已提交
741 742 743 744 745 746
		spin_lock_irq(&rq->lock);
		push_rt_tasks(rq);
		spin_unlock_irq(&rq->lock);
	}
}

747 748 749 750 751

static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
{
	if (unlikely(rt_task(p)) &&
	    !task_running(rq, p) &&
G
Gregory Haskins 已提交
752 753
	    (p->prio >= rq->rt.highest_prio) &&
	    rq->rt.overloaded)
754 755 756
		push_rt_tasks(rq);
}

P
Peter Williams 已提交
757
static unsigned long
I
Ingo Molnar 已提交
758
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
759 760 761
		unsigned long max_load_move,
		struct sched_domain *sd, enum cpu_idle_type idle,
		int *all_pinned, int *this_best_prio)
I
Ingo Molnar 已提交
762
{
763 764
	/* don't touch RT tasks */
	return 0;
765 766 767 768 769 770
}

static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
		 struct sched_domain *sd, enum cpu_idle_type idle)
{
771 772
	/* don't touch RT tasks */
	return 0;
I
Ingo Molnar 已提交
773
}
774

775 776 777 778 779 780 781 782 783 784 785 786 787
static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
{
	int weight = cpus_weight(*new_mask);

	BUG_ON(!rt_task(p));

	/*
	 * Update the migration status of the RQ if we have an RT task
	 * which is running AND changing its weight value.
	 */
	if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
		struct rq *rq = task_rq(p);

788
		if ((p->nr_cpus_allowed <= 1) && (weight > 1)) {
789
			rq->rt.rt_nr_migratory++;
790
		} else if ((p->nr_cpus_allowed > 1) && (weight <= 1)) {
791 792 793 794 795 796 797 798 799 800
			BUG_ON(!rq->rt.rt_nr_migratory);
			rq->rt.rt_nr_migratory--;
		}

		update_rt_migration(rq);
	}

	p->cpus_allowed    = *new_mask;
	p->nr_cpus_allowed = weight;
}
801

S
Steven Rostedt 已提交
802 803
#else /* CONFIG_SMP */
# define schedule_tail_balance_rt(rq)	do { } while (0)
804
# define schedule_balance_rt(rq, prev)	do { } while (0)
805
# define wakeup_balance_rt(rq, p)	do { } while (0)
S
Steven Rostedt 已提交
806
#endif /* CONFIG_SMP */
I
Ingo Molnar 已提交
807 808 809

static void task_tick_rt(struct rq *rq, struct task_struct *p)
{
810 811
	update_curr_rt(rq);

I
Ingo Molnar 已提交
812 813 814 815 816 817 818 819 820 821
	/*
	 * RR tasks need a special form of timeslice management.
	 * FIFO tasks have no timeslices.
	 */
	if (p->policy != SCHED_RR)
		return;

	if (--p->time_slice)
		return;

D
Dmitry Adamushko 已提交
822
	p->time_slice = DEF_TIMESLICE;
I
Ingo Molnar 已提交
823

824 825 826 827 828 829 830 831
	/*
	 * Requeue to the end of queue if we are not the only element
	 * on the queue:
	 */
	if (p->run_list.prev != p->run_list.next) {
		requeue_task_rt(rq, p);
		set_tsk_need_resched(p);
	}
I
Ingo Molnar 已提交
832 833
}

834 835 836 837 838 839 840
static void set_curr_task_rt(struct rq *rq)
{
	struct task_struct *p = rq->curr;

	p->se.exec_start = rq->clock;
}

841 842
const struct sched_class rt_sched_class = {
	.next			= &fair_sched_class,
I
Ingo Molnar 已提交
843 844 845
	.enqueue_task		= enqueue_task_rt,
	.dequeue_task		= dequeue_task_rt,
	.yield_task		= yield_task_rt,
846 847 848
#ifdef CONFIG_SMP
	.select_task_rq		= select_task_rq_rt,
#endif /* CONFIG_SMP */
I
Ingo Molnar 已提交
849 850 851 852 853 854

	.check_preempt_curr	= check_preempt_curr_rt,

	.pick_next_task		= pick_next_task_rt,
	.put_prev_task		= put_prev_task_rt,

855
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
856
	.load_balance		= load_balance_rt,
857
	.move_one_task		= move_one_task_rt,
858
	.set_cpus_allowed       = set_cpus_allowed_rt,
859
#endif
I
Ingo Molnar 已提交
860

861
	.set_curr_task          = set_curr_task_rt,
I
Ingo Molnar 已提交
862 863
	.task_tick		= task_tick_rt,
};