sched_fair.c 34.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
 *
 *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 *  Interactivity improvements by Mike Galbraith
 *  (C) 2007 Mike Galbraith <efault@gmx.de>
 *
 *  Various enhancements by Dmitry Adamushko.
 *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
 *
 *  Group scheduling enhancements by Srivatsa Vaddagiri
 *  Copyright IBM Corporation, 2007
 *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
 *
 *  Scaled math optimizations by Thomas Gleixner
 *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 19 20
 *
 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21 22
 */

A
Arjan van de Ven 已提交
23 24
#include <linux/latencytop.h>

25
/*
26
 * Targeted preemption latency for CPU-bound tasks:
27
 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
28
 *
29
 * NOTE: this latency value is not the same as the concept of
I
Ingo Molnar 已提交
30 31 32
 * 'timeslice length' - timeslices in CFS are of variable length
 * and have no persistent notion like in traditional, time-slice
 * based scheduling concepts.
33
 *
I
Ingo Molnar 已提交
34 35
 * (to see the precise effective timeslice length of your workload,
 *  run vmstat and monitor the context-switches (cs) field)
36
 */
I
Ingo Molnar 已提交
37
unsigned int sysctl_sched_latency = 20000000ULL;
38 39

/*
40
 * Minimal preemption granularity for CPU-bound tasks:
41
 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
42
 */
43
unsigned int sysctl_sched_min_granularity = 4000000ULL;
44 45

/*
46 47
 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
 */
48
static unsigned int sched_nr_latency = 5;
49 50 51 52

/*
 * After fork, child runs first. (default) If set to 0 then
 * parent will (try to) run first.
53
 */
54
const_debug unsigned int sysctl_sched_child_runs_first = 1;
55

56 57 58 59 60 61 62 63
/*
 * sys_sched_yield() compat mode
 *
 * This option switches the agressive yield implementation of the
 * old scheduler back on.
 */
unsigned int __read_mostly sysctl_sched_compat_yield;

64 65
/*
 * SCHED_BATCH wake-up granularity.
66
 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
67 68 69 70 71
 *
 * This option delays the preemption effects of decoupled workloads
 * and reduces their over-scheduling. Synchronous workloads will still
 * have immediate wakeup/sleep latencies.
 */
I
Ingo Molnar 已提交
72
unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
73 74 75

/*
 * SCHED_OTHER wake-up granularity.
76
 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
77 78 79 80 81
 *
 * This option delays the preemption effects of decoupled workloads
 * and reduces their over-scheduling. Synchronous workloads will still
 * have immediate wakeup/sleep latencies.
 */
I
Ingo Molnar 已提交
82
unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
83

84 85
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;

86 87 88 89
/**************************************************************
 * CFS operations on generic schedulable entities:
 */

90
#ifdef CONFIG_FAIR_GROUP_SCHED
91

92
/* cpu runqueue to which this cfs_rq is attached */
93 94
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
95
	return cfs_rq->rq;
96 97
}

98 99
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se)	(!se->my_q)
100

101
#else	/* CONFIG_FAIR_GROUP_SCHED */
102

103 104 105
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
	return container_of(cfs_rq, struct rq, cfs);
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
}

#define entity_is_task(se)	1

#endif	/* CONFIG_FAIR_GROUP_SCHED */

static inline struct task_struct *task_of(struct sched_entity *se)
{
	return container_of(se, struct task_struct, se);
}


/**************************************************************
 * Scheduling class tree data structure manipulation methods:
 */

122
static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
123
{
124 125
	s64 delta = (s64)(vruntime - min_vruntime);
	if (delta > 0)
126 127 128 129 130
		min_vruntime = vruntime;

	return min_vruntime;
}

131
static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
P
Peter Zijlstra 已提交
132 133 134 135 136 137 138 139
{
	s64 delta = (s64)(vruntime - min_vruntime);
	if (delta < 0)
		min_vruntime = vruntime;

	return min_vruntime;
}

140
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
141
{
142
	return se->vruntime - cfs_rq->min_vruntime;
143 144
}

145 146 147
/*
 * Enqueue an entity into the rb-tree:
 */
148
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
149 150 151 152
{
	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
	struct rb_node *parent = NULL;
	struct sched_entity *entry;
153
	s64 key = entity_key(cfs_rq, se);
154 155 156 157 158 159 160 161 162 163 164 165
	int leftmost = 1;

	/*
	 * Find the right place in the rbtree:
	 */
	while (*link) {
		parent = *link;
		entry = rb_entry(parent, struct sched_entity, run_node);
		/*
		 * We dont care about collisions. Nodes with
		 * the same key stay together.
		 */
166
		if (key < entity_key(cfs_rq, entry)) {
167 168 169 170 171 172 173 174 175 176 177
			link = &parent->rb_left;
		} else {
			link = &parent->rb_right;
			leftmost = 0;
		}
	}

	/*
	 * Maintain a cache of leftmost tree entries (it is frequently
	 * used):
	 */
P
Peter Zijlstra 已提交
178
	if (leftmost) {
I
Ingo Molnar 已提交
179
		cfs_rq->rb_leftmost = &se->run_node;
P
Peter Zijlstra 已提交
180 181 182 183 184 185 186
		/*
		 * maintain cfs_rq->min_vruntime to be a monotonic increasing
		 * value tracking the leftmost vruntime in the tree.
		 */
		cfs_rq->min_vruntime =
			max_vruntime(cfs_rq->min_vruntime, se->vruntime);
	}
187 188 189 190 191

	rb_link_node(&se->run_node, parent, link);
	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
}

192
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
193
{
P
Peter Zijlstra 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
	if (cfs_rq->rb_leftmost == &se->run_node) {
		struct rb_node *next_node;
		struct sched_entity *next;

		next_node = rb_next(&se->run_node);
		cfs_rq->rb_leftmost = next_node;

		if (next_node) {
			next = rb_entry(next_node,
					struct sched_entity, run_node);
			cfs_rq->min_vruntime =
				max_vruntime(cfs_rq->min_vruntime,
					     next->vruntime);
		}
	}
I
Ingo Molnar 已提交
209

210 211 212
	if (cfs_rq->next == se)
		cfs_rq->next = NULL;

213 214 215 216 217 218 219 220 221 222 223 224 225
	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}

static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
{
	return cfs_rq->rb_leftmost;
}

static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
{
	return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
}

226 227
static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
228
	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
229

230 231
	if (!last)
		return NULL;
232 233

	return rb_entry(last, struct sched_entity, run_node);
234 235
}

236 237 238 239
/**************************************************************
 * Scheduling class statistics methods:
 */

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
#ifdef CONFIG_SCHED_DEBUG
int sched_nr_latency_handler(struct ctl_table *table, int write,
		struct file *filp, void __user *buffer, size_t *lenp,
		loff_t *ppos)
{
	int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);

	if (ret || !write)
		return ret;

	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
					sysctl_sched_min_granularity);

	return 0;
}
#endif
256 257 258 259 260 261 262 263 264

/*
 * The idea is to set a period in which each task runs once.
 *
 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
 * this period because otherwise the slices get too small.
 *
 * p = (nr <= nl) ? l : l*nr/nl
 */
265 266 267
static u64 __sched_period(unsigned long nr_running)
{
	u64 period = sysctl_sched_latency;
268
	unsigned long nr_latency = sched_nr_latency;
269 270

	if (unlikely(nr_running > nr_latency)) {
271
		period = sysctl_sched_min_granularity;
272 273 274 275 276 277
		period *= nr_running;
	}

	return period;
}

278 279 280 281 282 283
/*
 * We calculate the wall-time slice from the period by taking a part
 * proportional to the weight.
 *
 * s = p*w/rw
 */
P
Peter Zijlstra 已提交
284
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
285
{
286
	u64 slice = __sched_period(cfs_rq->nr_running);
287

288 289
	slice *= se->load.weight;
	do_div(slice, cfs_rq->load.weight);
290

291
	return slice;
292 293
}

294 295 296 297 298 299
/*
 * We calculate the vruntime slice.
 *
 * vs = s/w = p/rw
 */
static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
P
Peter Zijlstra 已提交
300
{
301
	u64 vslice = __sched_period(nr_running);
P
Peter Zijlstra 已提交
302

P
Peter Zijlstra 已提交
303
	vslice *= NICE_0_LOAD;
304
	do_div(vslice, rq_weight);
P
Peter Zijlstra 已提交
305

306 307
	return vslice;
}
308

309 310 311 312 313 314 315 316 317
static u64 sched_vslice(struct cfs_rq *cfs_rq)
{
	return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
}

static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	return __sched_vslice(cfs_rq->load.weight + se->load.weight,
			cfs_rq->nr_running + 1);
P
Peter Zijlstra 已提交
318 319
}

320 321 322 323 324
/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
static inline void
I
Ingo Molnar 已提交
325 326
__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	      unsigned long delta_exec)
327
{
328
	unsigned long delta_exec_weighted;
329

330
	schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
331 332

	curr->sum_exec_runtime += delta_exec;
333
	schedstat_add(cfs_rq, exec_clock, delta_exec);
I
Ingo Molnar 已提交
334 335 336 337 338 339
	delta_exec_weighted = delta_exec;
	if (unlikely(curr->load.weight != NICE_0_LOAD)) {
		delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
							&curr->load);
	}
	curr->vruntime += delta_exec_weighted;
340 341
}

342
static void update_curr(struct cfs_rq *cfs_rq)
343
{
344
	struct sched_entity *curr = cfs_rq->curr;
I
Ingo Molnar 已提交
345
	u64 now = rq_of(cfs_rq)->clock;
346 347 348 349 350 351 352 353 354 355
	unsigned long delta_exec;

	if (unlikely(!curr))
		return;

	/*
	 * Get the amount of time the current task was running
	 * since the last time we changed load (this cannot
	 * overflow on 32 bits):
	 */
I
Ingo Molnar 已提交
356
	delta_exec = (unsigned long)(now - curr->exec_start);
357

I
Ingo Molnar 已提交
358 359
	__update_curr(cfs_rq, curr, delta_exec);
	curr->exec_start = now;
360 361 362 363 364 365

	if (entity_is_task(curr)) {
		struct task_struct *curtask = task_of(curr);

		cpuacct_charge(curtask, delta_exec);
	}
366 367 368
}

static inline void
369
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
370
{
371
	schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
372 373 374 375 376
}

/*
 * Task is being enqueued - update stats:
 */
377
static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
378 379 380 381 382
{
	/*
	 * Are we enqueueing a waiting task? (for current tasks
	 * a dequeue/enqueue event is a NOP)
	 */
383
	if (se != cfs_rq->curr)
384
		update_stats_wait_start(cfs_rq, se);
385 386 387
}

static void
388
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
389
{
390 391
	schedstat_set(se->wait_max, max(se->wait_max,
			rq_of(cfs_rq)->clock - se->wait_start));
392 393 394
	schedstat_set(se->wait_count, se->wait_count + 1);
	schedstat_set(se->wait_sum, se->wait_sum +
			rq_of(cfs_rq)->clock - se->wait_start);
I
Ingo Molnar 已提交
395
	schedstat_set(se->wait_start, 0);
396 397 398
}

static inline void
399
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
400 401 402 403 404
{
	/*
	 * Mark the end of the wait period if dequeueing a
	 * waiting task:
	 */
405
	if (se != cfs_rq->curr)
406
		update_stats_wait_end(cfs_rq, se);
407 408 409 410 411 412
}

/*
 * We are picking a new current task - update its stats:
 */
static inline void
413
update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
414 415 416 417
{
	/*
	 * We are starting a new run period:
	 */
418
	se->exec_start = rq_of(cfs_rq)->clock;
419 420 421 422 423 424
}

/**************************************************
 * Scheduling class queueing methods:
 */

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	update_load_add(&cfs_rq->load, se->load.weight);
	cfs_rq->nr_running++;
	se->on_rq = 1;
}

static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	update_load_sub(&cfs_rq->load, se->load.weight);
	cfs_rq->nr_running--;
	se->on_rq = 0;
}

441
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
442 443 444
{
#ifdef CONFIG_SCHEDSTATS
	if (se->sleep_start) {
445
		u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
A
Arjan van de Ven 已提交
446
		struct task_struct *tsk = task_of(se);
447 448 449 450 451 452 453 454 455

		if ((s64)delta < 0)
			delta = 0;

		if (unlikely(delta > se->sleep_max))
			se->sleep_max = delta;

		se->sleep_start = 0;
		se->sum_sleep_runtime += delta;
A
Arjan van de Ven 已提交
456 457

		account_scheduler_latency(tsk, delta >> 10, 1);
458 459
	}
	if (se->block_start) {
460
		u64 delta = rq_of(cfs_rq)->clock - se->block_start;
A
Arjan van de Ven 已提交
461
		struct task_struct *tsk = task_of(se);
462 463 464 465 466 467 468 469 470

		if ((s64)delta < 0)
			delta = 0;

		if (unlikely(delta > se->block_max))
			se->block_max = delta;

		se->block_start = 0;
		se->sum_sleep_runtime += delta;
I
Ingo Molnar 已提交
471 472 473 474 475 476 477

		/*
		 * Blocking time is in units of nanosecs, so shift by 20 to
		 * get a milliseconds-range estimation of the amount of
		 * time that the task spent sleeping:
		 */
		if (unlikely(prof_on == SLEEP_PROFILING)) {
I
Ingo Molnar 已提交
478

I
Ingo Molnar 已提交
479 480 481
			profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
				     delta >> 20);
		}
A
Arjan van de Ven 已提交
482
		account_scheduler_latency(tsk, delta >> 10, 0);
483 484 485 486
	}
#endif
}

P
Peter Zijlstra 已提交
487 488 489 490 491 492 493 494 495 496 497 498 499
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
#ifdef CONFIG_SCHED_DEBUG
	s64 d = se->vruntime - cfs_rq->min_vruntime;

	if (d < 0)
		d = -d;

	if (d > 3*sysctl_sched_latency)
		schedstat_inc(cfs_rq, nr_spread_over);
#endif
}

500 501 502
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
P
Peter Zijlstra 已提交
503
	u64 vruntime;
504

P
Peter Zijlstra 已提交
505 506 507 508 509
	if (first_fair(cfs_rq)) {
		vruntime = min_vruntime(cfs_rq->min_vruntime,
				__pick_next_entity(cfs_rq)->vruntime);
	} else
		vruntime = cfs_rq->min_vruntime;
P
Peter Zijlstra 已提交
510

511
	if (sched_feat(TREE_AVG)) {
P
Peter Zijlstra 已提交
512 513
		struct sched_entity *last = __pick_last_entity(cfs_rq);
		if (last) {
P
Peter Zijlstra 已提交
514 515
			vruntime += last->vruntime;
			vruntime >>= 1;
P
Peter Zijlstra 已提交
516
		}
P
Peter Zijlstra 已提交
517
	} else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
518
		vruntime += sched_vslice(cfs_rq)/2;
P
Peter Zijlstra 已提交
519

520 521 522 523 524 525
	/*
	 * The 'current' period is already promised to the current tasks,
	 * however the extra weight of the new task will slow them down a
	 * little, place the new task so that it fits in the slot that
	 * stays open at the end.
	 */
P
Peter Zijlstra 已提交
526
	if (initial && sched_feat(START_DEBIT))
527
		vruntime += sched_vslice_add(cfs_rq, se);
528

I
Ingo Molnar 已提交
529
	if (!initial) {
530
		/* sleeps upto a single latency don't count. */
531
		if (sched_feat(NEW_FAIR_SLEEPERS))
532 533
			vruntime -= sysctl_sched_latency;

534 535
		/* ensure we never gain time by being placed backwards. */
		vruntime = max_vruntime(se->vruntime, vruntime);
536 537
	}

P
Peter Zijlstra 已提交
538
	se->vruntime = vruntime;
539 540
}

541
static void
542
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
543 544
{
	/*
545
	 * Update run-time statistics of the 'current'.
546
	 */
547
	update_curr(cfs_rq);
548

I
Ingo Molnar 已提交
549
	if (wakeup) {
550
		place_entity(cfs_rq, se, 0);
551
		enqueue_sleeper(cfs_rq, se);
I
Ingo Molnar 已提交
552
	}
553

554
	update_stats_enqueue(cfs_rq, se);
P
Peter Zijlstra 已提交
555
	check_spread(cfs_rq, se);
556 557
	if (se != cfs_rq->curr)
		__enqueue_entity(cfs_rq, se);
558
	account_entity_enqueue(cfs_rq, se);
559 560 561
}

static void
562
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
563
{
564 565 566 567 568
	/*
	 * Update run-time statistics of the 'current'.
	 */
	update_curr(cfs_rq);

569
	update_stats_dequeue(cfs_rq, se);
570
	if (sleep) {
P
Peter Zijlstra 已提交
571
#ifdef CONFIG_SCHEDSTATS
572 573 574 575
		if (entity_is_task(se)) {
			struct task_struct *tsk = task_of(se);

			if (tsk->state & TASK_INTERRUPTIBLE)
576
				se->sleep_start = rq_of(cfs_rq)->clock;
577
			if (tsk->state & TASK_UNINTERRUPTIBLE)
578
				se->block_start = rq_of(cfs_rq)->clock;
579
		}
580
#endif
P
Peter Zijlstra 已提交
581 582
	}

583
	if (se != cfs_rq->curr)
584 585
		__dequeue_entity(cfs_rq, se);
	account_entity_dequeue(cfs_rq, se);
586 587 588 589 590
}

/*
 * Preempt the current task with a newly woken task if needed:
 */
591
static void
I
Ingo Molnar 已提交
592
check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
593
{
594 595
	unsigned long ideal_runtime, delta_exec;

P
Peter Zijlstra 已提交
596
	ideal_runtime = sched_slice(cfs_rq, curr);
597
	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
I
Ingo Molnar 已提交
598
	if (delta_exec > ideal_runtime)
599 600 601
		resched_task(rq_of(cfs_rq)->curr);
}

602
static void
603
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
604
{
605 606 607 608 609 610 611 612 613 614 615
	/* 'current' is not kept within the tree. */
	if (se->on_rq) {
		/*
		 * Any task has to be enqueued before it get to execute on
		 * a CPU. So account for the time it spent waiting on the
		 * runqueue.
		 */
		update_stats_wait_end(cfs_rq, se);
		__dequeue_entity(cfs_rq, se);
	}

616
	update_stats_curr_start(cfs_rq, se);
617
	cfs_rq->curr = se;
I
Ingo Molnar 已提交
618 619 620 621 622 623
#ifdef CONFIG_SCHEDSTATS
	/*
	 * Track our maximum slice length, if the CPU's load is at
	 * least twice that of our own weight (i.e. dont track it
	 * when there are only lesser-weight tasks around):
	 */
624
	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
I
Ingo Molnar 已提交
625 626 627 628
		se->slice_max = max(se->slice_max,
			se->sum_exec_runtime - se->prev_sum_exec_runtime);
	}
#endif
629
	se->prev_sum_exec_runtime = se->sum_exec_runtime;
630 631
}

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
static struct sched_entity *
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	s64 diff, gran;

	if (!cfs_rq->next)
		return se;

	diff = cfs_rq->next->vruntime - se->vruntime;
	if (diff < 0)
		return se;

	gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load);
	if (diff > gran)
		return se;

	return cfs_rq->next;
}

651
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
652
{
D
Dmitry Adamushko 已提交
653
	struct sched_entity *se = NULL;
654

D
Dmitry Adamushko 已提交
655 656
	if (first_fair(cfs_rq)) {
		se = __pick_next_entity(cfs_rq);
657
		se = pick_next(cfs_rq, se);
D
Dmitry Adamushko 已提交
658 659
		set_next_entity(cfs_rq, se);
	}
660 661 662 663

	return se;
}

664
static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
665 666 667 668 669 670
{
	/*
	 * If still on the runqueue then deactivate_task()
	 * was not called and update_curr() has to be done:
	 */
	if (prev->on_rq)
671
		update_curr(cfs_rq);
672

P
Peter Zijlstra 已提交
673
	check_spread(cfs_rq, prev);
674
	if (prev->on_rq) {
675
		update_stats_wait_start(cfs_rq, prev);
676 677 678
		/* Put 'current' back into the tree. */
		__enqueue_entity(cfs_rq, prev);
	}
679
	cfs_rq->curr = NULL;
680 681
}

P
Peter Zijlstra 已提交
682 683
static void
entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
684 685
{
	/*
686
	 * Update run-time statistics of the 'current'.
687
	 */
688
	update_curr(cfs_rq);
689

P
Peter Zijlstra 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
#ifdef CONFIG_SCHED_HRTICK
	/*
	 * queued ticks are scheduled to match the slice, so don't bother
	 * validating it and just reschedule.
	 */
	if (queued)
		return resched_task(rq_of(cfs_rq)->curr);
	/*
	 * don't let the period tick interfere with the hrtick preemption
	 */
	if (!sched_feat(DOUBLE_TICK) &&
			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
		return;
#endif

705
	if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
I
Ingo Molnar 已提交
706
		check_preempt_tick(cfs_rq, curr);
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
}

/**************************************************
 * CFS operations on tasks:
 */

#ifdef CONFIG_FAIR_GROUP_SCHED

/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
		for (; se; se = se->parent)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
	return p->se.cfs_rq;
}

/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
	return se->cfs_rq;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
	return grp->my_q;
}

/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
 * another cpu ('this_cpu')
 */
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
S
Srivatsa Vaddagiri 已提交
741
	return cfs_rq->tg->cfs_rq[this_cpu];
742 743 744 745
}

/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
746
	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
747

748 749 750
/* Do the two (enqueued) entities belong to the same group ? */
static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
751
{
752
	if (se->cfs_rq == pse->cfs_rq)
753 754 755 756 757
		return 1;

	return 0;
}

758 759 760 761 762
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
	return se->parent;
}

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
#else	/* CONFIG_FAIR_GROUP_SCHED */

#define for_each_sched_entity(se) \
		for (; se; se = NULL)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
	return &task_rq(p)->cfs;
}

static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
	struct task_struct *p = task_of(se);
	struct rq *rq = task_rq(p);

	return &rq->cfs;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
	return NULL;
}

static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
	return &cpu_rq(this_cpu)->cfs;
}

#define for_each_leaf_cfs_rq(rq, cfs_rq) \
		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)

795 796
static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
797 798 799 800
{
	return 1;
}

801 802 803 804 805
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
	return NULL;
}

806 807
#endif	/* CONFIG_FAIR_GROUP_SCHED */

P
Peter Zijlstra 已提交
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
	int requeue = rq->curr == p;
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);

	WARN_ON(task_rq(p) != rq);

	if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
		u64 slice = sched_slice(cfs_rq, se);
		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
		s64 delta = slice - ran;

		if (delta < 0) {
			if (rq->curr == p)
				resched_task(p);
			return;
		}

		/*
		 * Don't schedule slices shorter than 10000ns, that just
		 * doesn't make sense. Rely on vruntime for fairness.
		 */
		if (!requeue)
			delta = max(10000LL, delta);

		hrtick_start(rq, delta, requeue);
	}
}
#else
static inline void
hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
}
#endif

845 846 847 848 849
/*
 * The enqueue_task method is called before nr_running is
 * increased. Here we update the fair scheduling stats and
 * then put the task into the rbtree:
 */
850
static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
851 852
{
	struct cfs_rq *cfs_rq;
853
	struct sched_entity *se = &p->se;
854 855

	for_each_sched_entity(se) {
856
		if (se->on_rq)
857 858
			break;
		cfs_rq = cfs_rq_of(se);
859
		enqueue_entity(cfs_rq, se, wakeup);
860
		wakeup = 1;
861
	}
P
Peter Zijlstra 已提交
862 863

	hrtick_start_fair(rq, rq->curr);
864 865 866 867 868 869 870
}

/*
 * The dequeue_task method is called before nr_running is
 * decreased. We remove the task from the rbtree and
 * update the fair scheduling stats:
 */
871
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
872 873
{
	struct cfs_rq *cfs_rq;
874
	struct sched_entity *se = &p->se;
875 876 877

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
878
		dequeue_entity(cfs_rq, se, sleep);
879
		/* Don't dequeue parent if it has other entities besides us */
880
		if (cfs_rq->load.weight)
881
			break;
882
		sleep = 1;
883
	}
P
Peter Zijlstra 已提交
884 885

	hrtick_start_fair(rq, rq->curr);
886 887 888
}

/*
889 890 891
 * sched_yield() support is very simple - we dequeue and enqueue.
 *
 * If compat_yield is turned on then we requeue to the end of the tree.
892
 */
893
static void yield_task_fair(struct rq *rq)
894
{
895 896 897
	struct task_struct *curr = rq->curr;
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
	struct sched_entity *rightmost, *se = &curr->se;
898 899

	/*
900 901 902 903 904
	 * Are we the only task in the tree?
	 */
	if (unlikely(cfs_rq->nr_running == 1))
		return;

905
	if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
906 907
		__update_rq_clock(rq);
		/*
908
		 * Update run-time statistics of the 'current'.
909
		 */
D
Dmitry Adamushko 已提交
910
		update_curr(cfs_rq);
911 912 913 914 915

		return;
	}
	/*
	 * Find the rightmost entry in the rbtree:
916
	 */
D
Dmitry Adamushko 已提交
917
	rightmost = __pick_last_entity(cfs_rq);
918 919 920
	/*
	 * Already in the rightmost position?
	 */
D
Dmitry Adamushko 已提交
921
	if (unlikely(rightmost->vruntime < se->vruntime))
922 923 924 925
		return;

	/*
	 * Minimally necessary key value to be last in the tree:
D
Dmitry Adamushko 已提交
926 927
	 * Upon rescheduling, sched_class::put_prev_task() will place
	 * 'current' within the tree based on its new key value.
928
	 */
929
	se->vruntime = rightmost->vruntime + 1;
930 931
}

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
/*
 * wake_idle() will wake a task on an idle cpu if task->cpu is
 * not idle and an idle cpu is available.  The span of cpus to
 * search starts with cpus closest then further out as needed,
 * so we always favor a closer, idle cpu.
 *
 * Returns the CPU we should wake onto.
 */
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
static int wake_idle(int cpu, struct task_struct *p)
{
	cpumask_t tmp;
	struct sched_domain *sd;
	int i;

	/*
	 * If it is idle, then it is the best cpu to run this task.
	 *
	 * This cpu is also the best, if it has more than one task already.
	 * Siblings must be also busy(in most cases) as they didn't already
	 * pickup the extra load from this cpu and hence we need not check
	 * sibling runqueue info. This will avoid the checks and cache miss
	 * penalities associated with that.
	 */
	if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
		return cpu;

	for_each_domain(cpu, sd) {
		if (sd->flags & SD_WAKE_IDLE) {
			cpus_and(tmp, sd->span, p->cpus_allowed);
			for_each_cpu_mask(i, tmp) {
				if (idle_cpu(i)) {
					if (i != task_cpu(p)) {
						schedstat_inc(p,
						       se.nr_wakeups_idle);
					}
					return i;
				}
			}
		} else {
			break;
		}
	}
	return cpu;
}
#else
static inline int wake_idle(int cpu, struct task_struct *p)
{
	return cpu;
}
#endif

#ifdef CONFIG_SMP
static int select_task_rq_fair(struct task_struct *p, int sync)
{
	int cpu, this_cpu;
	struct rq *rq;
	struct sched_domain *sd, *this_sd = NULL;
	int new_cpu;

	cpu      = task_cpu(p);
	rq       = task_rq(p);
	this_cpu = smp_processor_id();
	new_cpu  = cpu;

997 998 999
	if (cpu == this_cpu)
		goto out_set_cpu;

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
	for_each_domain(this_cpu, sd) {
		if (cpu_isset(cpu, sd->span)) {
			this_sd = sd;
			break;
		}
	}

	if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
		goto out_set_cpu;

	/*
	 * Check for affine wakeup and passive balancing possibilities.
	 */
	if (this_sd) {
		int idx = this_sd->wake_idx;
		unsigned int imbalance;
		unsigned long load, this_load;

		imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;

		load = source_load(cpu, idx);
		this_load = target_load(this_cpu, idx);

		new_cpu = this_cpu; /* Wake to this CPU if we can */

		if (this_sd->flags & SD_WAKE_AFFINE) {
			unsigned long tl = this_load;
			unsigned long tl_per_task;

			/*
			 * Attract cache-cold tasks on sync wakeups:
			 */
			if (sync && !task_hot(p, rq->clock, this_sd))
				goto out_set_cpu;

			schedstat_inc(p, se.nr_wakeups_affine_attempts);
			tl_per_task = cpu_avg_load_per_task(this_cpu);

			/*
			 * If sync wakeup then subtract the (maximum possible)
			 * effect of the currently running task from the load
			 * of the current CPU:
			 */
			if (sync)
				tl -= current->se.load.weight;

			if ((tl <= load &&
				tl + target_load(cpu, idx) <= tl_per_task) ||
			       100*(tl + p->se.load.weight) <= imbalance*load) {
				/*
				 * This domain has SD_WAKE_AFFINE and
				 * p is cache cold in this domain, and
				 * there is no bad imbalance.
				 */
				schedstat_inc(this_sd, ttwu_move_affine);
				schedstat_inc(p, se.nr_wakeups_affine);
				goto out_set_cpu;
			}
		}

		/*
		 * Start passive balancing when half the imbalance_pct
		 * limit is reached.
		 */
		if (this_sd->flags & SD_WAKE_BALANCE) {
			if (imbalance*this_load <= 100*load) {
				schedstat_inc(this_sd, ttwu_move_balance);
				schedstat_inc(p, se.nr_wakeups_passive);
				goto out_set_cpu;
			}
		}
	}

	new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
out_set_cpu:
	return wake_idle(new_cpu, p);
}
#endif /* CONFIG_SMP */


1080 1081 1082
/*
 * Preempt the current task with a newly woken task if needed:
 */
I
Ingo Molnar 已提交
1083
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
1084 1085
{
	struct task_struct *curr = rq->curr;
1086
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1087
	struct sched_entity *se = &curr->se, *pse = &p->se;
1088
	unsigned long gran;
1089 1090

	if (unlikely(rt_prio(p->prio))) {
I
Ingo Molnar 已提交
1091
		update_rq_clock(rq);
1092
		update_curr(cfs_rq);
1093 1094 1095
		resched_task(curr);
		return;
	}
1096 1097 1098

	cfs_rq_of(pse)->next = pse;

1099 1100 1101 1102 1103 1104
	/*
	 * Batch tasks do not preempt (their preemption is driven by
	 * the tick):
	 */
	if (unlikely(p->policy == SCHED_BATCH))
		return;
1105

1106 1107
	if (!sched_feat(WAKEUP_PREEMPT))
		return;
1108

1109 1110 1111
	while (!is_same_group(se, pse)) {
		se = parent_entity(se);
		pse = parent_entity(pse);
1112
	}
1113 1114

	gran = sysctl_sched_wakeup_granularity;
1115 1116 1117 1118 1119
	/*
	 * More easily preempt - nice tasks, while not making
	 * it harder for + nice tasks.
	 */
	if (unlikely(se->load.weight > NICE_0_LOAD))
1120 1121
		gran = calc_delta_fair(gran, &se->load);

1122
	if (pse->vruntime + gran < se->vruntime)
1123
		resched_task(curr);
1124 1125
}

1126
static struct task_struct *pick_next_task_fair(struct rq *rq)
1127
{
P
Peter Zijlstra 已提交
1128
	struct task_struct *p;
1129 1130 1131 1132 1133 1134 1135
	struct cfs_rq *cfs_rq = &rq->cfs;
	struct sched_entity *se;

	if (unlikely(!cfs_rq->nr_running))
		return NULL;

	do {
1136
		se = pick_next_entity(cfs_rq);
1137 1138 1139
		cfs_rq = group_cfs_rq(se);
	} while (cfs_rq);

P
Peter Zijlstra 已提交
1140 1141 1142 1143
	p = task_of(se);
	hrtick_start_fair(rq, p);

	return p;
1144 1145 1146 1147 1148
}

/*
 * Account for a descheduled task:
 */
1149
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1150 1151 1152 1153 1154 1155
{
	struct sched_entity *se = &prev->se;
	struct cfs_rq *cfs_rq;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
1156
		put_prev_entity(cfs_rq, se);
1157 1158 1159
	}
}

1160
#ifdef CONFIG_SMP
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
/**************************************************
 * Fair scheduling class load-balancing methods:
 */

/*
 * Load-balancing iterator. Note: while the runqueue stays locked
 * during the whole iteration, the current task might be
 * dequeued so the iterator has to be dequeue-safe. Here we
 * achieve that by always pre-iterating before returning
 * the current task:
 */
A
Alexey Dobriyan 已提交
1172
static struct task_struct *
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
{
	struct task_struct *p;

	if (!curr)
		return NULL;

	p = rb_entry(curr, struct task_struct, se.run_node);
	cfs_rq->rb_load_balance_curr = rb_next(curr);

	return p;
}

static struct task_struct *load_balance_start_fair(void *arg)
{
	struct cfs_rq *cfs_rq = arg;

	return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
}

static struct task_struct *load_balance_next_fair(void *arg)
{
	struct cfs_rq *cfs_rq = arg;

	return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
}

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
{
	struct sched_entity *curr;
	struct task_struct *p;

	if (!cfs_rq->nr_running || !first_fair(cfs_rq))
		return MAX_PRIO;

	curr = cfs_rq->curr;
	if (!curr)
		curr = __pick_next_entity(cfs_rq);

	p = task_of(curr);

	return p->prio;
}
#endif

P
Peter Williams 已提交
1219
static unsigned long
1220
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1221
		  unsigned long max_load_move,
1222 1223
		  struct sched_domain *sd, enum cpu_idle_type idle,
		  int *all_pinned, int *this_best_prio)
1224 1225 1226 1227 1228 1229 1230 1231 1232
{
	struct cfs_rq *busy_cfs_rq;
	long rem_load_move = max_load_move;
	struct rq_iterator cfs_rq_iterator;

	cfs_rq_iterator.start = load_balance_start_fair;
	cfs_rq_iterator.next = load_balance_next_fair;

	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1233
#ifdef CONFIG_FAIR_GROUP_SCHED
1234 1235 1236
		struct cfs_rq *this_cfs_rq;
		long imbalance;
		unsigned long maxload;
1237

1238
		this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
1239

1240 1241 1242
		imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
		/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
		if (imbalance <= 0)
1243 1244
			continue;

1245 1246 1247
		/* Don't pull more than imbalance/2 */
		imbalance /= 2;
		maxload = min(rem_load_move, imbalance);
1248

1249
		*this_best_prio = cfs_rq_best_prio(this_cfs_rq);
1250
#else
1251
# define maxload rem_load_move
1252
#endif
1253 1254
		/*
		 * pass busy_cfs_rq argument into
1255 1256 1257
		 * load_balance_[start|next]_fair iterators
		 */
		cfs_rq_iterator.arg = busy_cfs_rq;
1258
		rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
1259 1260 1261
					       maxload, sd, idle, all_pinned,
					       this_best_prio,
					       &cfs_rq_iterator);
1262

1263
		if (rem_load_move <= 0)
1264 1265 1266
			break;
	}

P
Peter Williams 已提交
1267
	return max_load_move - rem_load_move;
1268 1269
}

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
static int
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
		   struct sched_domain *sd, enum cpu_idle_type idle)
{
	struct cfs_rq *busy_cfs_rq;
	struct rq_iterator cfs_rq_iterator;

	cfs_rq_iterator.start = load_balance_start_fair;
	cfs_rq_iterator.next = load_balance_next_fair;

	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
		/*
		 * pass busy_cfs_rq argument into
		 * load_balance_[start|next]_fair iterators
		 */
		cfs_rq_iterator.arg = busy_cfs_rq;
		if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
				       &cfs_rq_iterator))
		    return 1;
	}

	return 0;
}
1293
#endif
1294

1295 1296 1297
/*
 * scheduler tick hitting a task of our scheduling class:
 */
P
Peter Zijlstra 已提交
1298
static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1299 1300 1301 1302 1303 1304
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &curr->se;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
P
Peter Zijlstra 已提交
1305
		entity_tick(cfs_rq, se, queued);
1306 1307 1308
	}
}

1309
#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
1310

1311 1312 1313 1314 1315 1316 1317
/*
 * Share the fairness runtime between parent and child, thus the
 * total amount of pressure for CPU stays equal - new tasks
 * get a chance to run but frequent forkers are not allowed to
 * monopolize the CPU. Note: the parent runqueue is locked,
 * the child is not running yet.
 */
1318
static void task_new_fair(struct rq *rq, struct task_struct *p)
1319 1320
{
	struct cfs_rq *cfs_rq = task_cfs_rq(p);
1321
	struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1322
	int this_cpu = smp_processor_id();
1323 1324 1325

	sched_info_queued(p);

1326
	update_curr(cfs_rq);
1327
	place_entity(cfs_rq, se, 1);
1328

1329
	/* 'curr' will be NULL if the child belongs to a different group */
1330
	if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1331
			curr && curr->vruntime < se->vruntime) {
D
Dmitry Adamushko 已提交
1332
		/*
1333 1334 1335
		 * Upon rescheduling, sched_class::put_prev_task() will place
		 * 'current' within the tree based on its new key value.
		 */
1336 1337
		swap(curr->vruntime, se->vruntime);
	}
1338

1339
	enqueue_task_fair(rq, p, 0);
1340
	resched_task(rq->curr);
1341 1342
}

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
/*
 * Priority of the task has changed. Check to see if we preempt
 * the current task.
 */
static void prio_changed_fair(struct rq *rq, struct task_struct *p,
			      int oldprio, int running)
{
	/*
	 * Reschedule if we are currently running on this runqueue and
	 * our priority decreased, or if we are not currently running on
	 * this runqueue and our priority is higher than the current's
	 */
	if (running) {
		if (p->prio > oldprio)
			resched_task(rq->curr);
	} else
		check_preempt_curr(rq, p);
}

/*
 * We switched to the sched_fair class.
 */
static void switched_to_fair(struct rq *rq, struct task_struct *p,
			     int running)
{
	/*
	 * We were most likely switched from sched_rt, so
	 * kick off the schedule if running, otherwise just see
	 * if we can still preempt the current task.
	 */
	if (running)
		resched_task(rq->curr);
	else
		check_preempt_curr(rq, p);
}

1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
/* Account for a task changing its policy or group.
 *
 * This routine is mostly called to set cfs_rq->curr field when a task
 * migrates between groups/classes.
 */
static void set_curr_task_fair(struct rq *rq)
{
	struct sched_entity *se = &rq->curr->se;

	for_each_sched_entity(se)
		set_next_entity(cfs_rq_of(se), se);
}

P
Peter Zijlstra 已提交
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
#ifdef CONFIG_FAIR_GROUP_SCHED
static void moved_group_fair(struct task_struct *p)
{
	struct cfs_rq *cfs_rq = task_cfs_rq(p);

	update_curr(cfs_rq);
	place_entity(cfs_rq, &p->se, 1);
}
#endif

1402 1403 1404
/*
 * All the scheduling class methods:
 */
1405 1406
static const struct sched_class fair_sched_class = {
	.next			= &idle_sched_class,
1407 1408 1409
	.enqueue_task		= enqueue_task_fair,
	.dequeue_task		= dequeue_task_fair,
	.yield_task		= yield_task_fair,
1410 1411 1412
#ifdef CONFIG_SMP
	.select_task_rq		= select_task_rq_fair,
#endif /* CONFIG_SMP */
1413

I
Ingo Molnar 已提交
1414
	.check_preempt_curr	= check_preempt_wakeup,
1415 1416 1417 1418

	.pick_next_task		= pick_next_task_fair,
	.put_prev_task		= put_prev_task_fair,

1419
#ifdef CONFIG_SMP
1420
	.load_balance		= load_balance_fair,
1421
	.move_one_task		= move_one_task_fair,
1422
#endif
1423

1424
	.set_curr_task          = set_curr_task_fair,
1425 1426
	.task_tick		= task_tick_fair,
	.task_new		= task_new_fair,
1427 1428 1429

	.prio_changed		= prio_changed_fair,
	.switched_to		= switched_to_fair,
P
Peter Zijlstra 已提交
1430 1431 1432 1433

#ifdef CONFIG_FAIR_GROUP_SCHED
	.moved_group		= moved_group_fair,
#endif
1434 1435 1436
};

#ifdef CONFIG_SCHED_DEBUG
1437
static void print_cfs_stats(struct seq_file *m, int cpu)
1438 1439 1440
{
	struct cfs_rq *cfs_rq;

S
Srivatsa Vaddagiri 已提交
1441 1442 1443
#ifdef CONFIG_FAIR_GROUP_SCHED
	print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
#endif
1444
	rcu_read_lock();
1445
	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1446
		print_cfs_rq(m, cpu, cfs_rq);
1447
	rcu_read_unlock();
1448 1449
}
#endif