fair.c 147.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
 *
 *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 *  Interactivity improvements by Mike Galbraith
 *  (C) 2007 Mike Galbraith <efault@gmx.de>
 *
 *  Various enhancements by Dmitry Adamushko.
 *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
 *
 *  Group scheduling enhancements by Srivatsa Vaddagiri
 *  Copyright IBM Corporation, 2007
 *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
 *
 *  Scaled math optimizations by Thomas Gleixner
 *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 19 20
 *
 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21 22
 */

A
Arjan van de Ven 已提交
23
#include <linux/latencytop.h>
24
#include <linux/sched.h>
25
#include <linux/cpumask.h>
26 27 28 29 30 31 32
#include <linux/slab.h>
#include <linux/profile.h>
#include <linux/interrupt.h>

#include <trace/events/sched.h>

#include "sched.h"
A
Arjan van de Ven 已提交
33

34
/*
35
 * Targeted preemption latency for CPU-bound tasks:
36
 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
37
 *
38
 * NOTE: this latency value is not the same as the concept of
I
Ingo Molnar 已提交
39 40 41
 * 'timeslice length' - timeslices in CFS are of variable length
 * and have no persistent notion like in traditional, time-slice
 * based scheduling concepts.
42
 *
I
Ingo Molnar 已提交
43 44
 * (to see the precise effective timeslice length of your workload,
 *  run vmstat and monitor the context-switches (cs) field)
45
 */
46 47
unsigned int sysctl_sched_latency = 6000000ULL;
unsigned int normalized_sysctl_sched_latency = 6000000ULL;
48

49 50 51 52 53 54 55 56 57 58 59 60
/*
 * The initial- and re-scaling of tunables is configurable
 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
 *
 * Options are:
 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
 */
enum sched_tunable_scaling sysctl_sched_tunable_scaling
	= SCHED_TUNABLESCALING_LOG;

61
/*
62
 * Minimal preemption granularity for CPU-bound tasks:
63
 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
64
 */
65 66
unsigned int sysctl_sched_min_granularity = 750000ULL;
unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
67 68

/*
69 70
 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
 */
71
static unsigned int sched_nr_latency = 8;
72 73

/*
74
 * After fork, child runs first. If set to 0 (default) then
75
 * parent will (try to) run first.
76
 */
77
unsigned int sysctl_sched_child_runs_first __read_mostly;
78 79 80

/*
 * SCHED_OTHER wake-up granularity.
81
 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
82 83 84 85 86
 *
 * This option delays the preemption effects of decoupled workloads
 * and reduces their over-scheduling. Synchronous workloads will still
 * have immediate wakeup/sleep latencies.
 */
87
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
88
unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
89

90 91
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;

92 93 94 95 96 97 98
/*
 * The exponential sliding  window over which load is averaged for shares
 * distribution.
 * (default: 10msec)
 */
unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;

99 100 101 102 103 104 105 106 107 108 109 110 111 112
#ifdef CONFIG_CFS_BANDWIDTH
/*
 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
 * each time a cfs_rq requests quota.
 *
 * Note: in the case that the slice exceeds the runtime remaining (either due
 * to consumption or the quota being specified to be smaller than the slice)
 * we will always only issue the remaining available time.
 *
 * default: 5 msec, units: microseconds
  */
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
/*
 * Increase the granularity value when there are more CPUs,
 * because with more CPUs the 'effective latency' as visible
 * to users decreases. But the relationship is not linear,
 * so pick a second-best guess by going with the log2 of the
 * number of CPUs.
 *
 * This idea comes from the SD scheduler of Con Kolivas:
 */
static int get_update_sysctl_factor(void)
{
	unsigned int cpus = min_t(int, num_online_cpus(), 8);
	unsigned int factor;

	switch (sysctl_sched_tunable_scaling) {
	case SCHED_TUNABLESCALING_NONE:
		factor = 1;
		break;
	case SCHED_TUNABLESCALING_LINEAR:
		factor = cpus;
		break;
	case SCHED_TUNABLESCALING_LOG:
	default:
		factor = 1 + ilog2(cpus);
		break;
	}

	return factor;
}

static void update_sysctl(void)
{
	unsigned int factor = get_update_sysctl_factor();

#define SET_SYSCTL(name) \
	(sysctl_##name = (factor) * normalized_sysctl_##name)
	SET_SYSCTL(sched_min_granularity);
	SET_SYSCTL(sched_latency);
	SET_SYSCTL(sched_wakeup_granularity);
#undef SET_SYSCTL
}

void sched_init_granularity(void)
{
	update_sysctl();
}

#if BITS_PER_LONG == 32
# define WMULT_CONST	(~0UL)
#else
# define WMULT_CONST	(1UL << 32)
#endif

#define WMULT_SHIFT	32

/*
 * Shift right and round:
 */
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))

/*
 * delta *= weight / lw
 */
static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
		struct load_weight *lw)
{
	u64 tmp;

	/*
	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
	 * 2^SCHED_LOAD_RESOLUTION.
	 */
	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
		tmp = (u64)delta_exec * scale_load_down(weight);
	else
		tmp = (u64)delta_exec;

	if (!lw->inv_weight) {
		unsigned long w = scale_load_down(lw->weight);

		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
			lw->inv_weight = 1;
		else if (unlikely(!w))
			lw->inv_weight = WMULT_CONST;
		else
			lw->inv_weight = WMULT_CONST / w;
	}

	/*
	 * Check whether we'd overflow the 64-bit multiplication:
	 */
	if (unlikely(tmp > WMULT_CONST))
		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
			WMULT_SHIFT/2);
	else
		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);

	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
}


const struct sched_class fair_sched_class;
217

218 219 220 221
/**************************************************************
 * CFS operations on generic schedulable entities:
 */

222
#ifdef CONFIG_FAIR_GROUP_SCHED
223

224
/* cpu runqueue to which this cfs_rq is attached */
225 226
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
227
	return cfs_rq->rq;
228 229
}

230 231
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se)	(!se->my_q)
232

233 234 235 236 237 238 239 240
static inline struct task_struct *task_of(struct sched_entity *se)
{
#ifdef CONFIG_SCHED_DEBUG
	WARN_ON_ONCE(!entity_is_task(se));
#endif
	return container_of(se, struct task_struct, se);
}

P
Peter Zijlstra 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
		for (; se; se = se->parent)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
	return p->se.cfs_rq;
}

/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
	return se->cfs_rq;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
	return grp->my_q;
}

262 263
static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
				       int force_update);
264

265 266 267
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
	if (!cfs_rq->on_list) {
268 269 270 271 272 273 274 275 276 277 278 279
		/*
		 * Ensure we either appear before our parent (if already
		 * enqueued) or force our parent to appear after us when it is
		 * enqueued.  The fact that we always enqueue bottom-up
		 * reduces this to two cases.
		 */
		if (cfs_rq->tg->parent &&
		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
				&rq_of(cfs_rq)->leaf_cfs_rq_list);
		} else {
			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
280
				&rq_of(cfs_rq)->leaf_cfs_rq_list);
281
		}
282 283

		cfs_rq->on_list = 1;
284
		/* We should have no load, but we need to update last_decay. */
285
		update_cfs_rq_blocked_load(cfs_rq, 0);
286 287 288 289 290 291 292 293 294 295 296
	}
}

static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
	if (cfs_rq->on_list) {
		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
		cfs_rq->on_list = 0;
	}
}

P
Peter Zijlstra 已提交
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)

/* Do the two (enqueued) entities belong to the same group ? */
static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
	if (se->cfs_rq == pse->cfs_rq)
		return 1;

	return 0;
}

static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
	return se->parent;
}

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
/* return depth at which a sched entity is present in the hierarchy */
static inline int depth_se(struct sched_entity *se)
{
	int depth = 0;

	for_each_sched_entity(se)
		depth++;

	return depth;
}

static void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
	int se_depth, pse_depth;

	/*
	 * preemption test can be made between sibling entities who are in the
	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
	 * both tasks until we find their ancestors who are siblings of common
	 * parent.
	 */

	/* First walk up until both entities are at same depth */
	se_depth = depth_se(*se);
	pse_depth = depth_se(*pse);

	while (se_depth > pse_depth) {
		se_depth--;
		*se = parent_entity(*se);
	}

	while (pse_depth > se_depth) {
		pse_depth--;
		*pse = parent_entity(*pse);
	}

	while (!is_same_group(*se, *pse)) {
		*se = parent_entity(*se);
		*pse = parent_entity(*pse);
	}
}

359 360 361 362 363 364
#else	/* !CONFIG_FAIR_GROUP_SCHED */

static inline struct task_struct *task_of(struct sched_entity *se)
{
	return container_of(se, struct task_struct, se);
}
365

366 367 368
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
	return container_of(cfs_rq, struct rq, cfs);
369 370 371 372
}

#define entity_is_task(se)	1

P
Peter Zijlstra 已提交
373 374
#define for_each_sched_entity(se) \
		for (; se; se = NULL)
375

P
Peter Zijlstra 已提交
376
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
377
{
P
Peter Zijlstra 已提交
378
	return &task_rq(p)->cfs;
379 380
}

P
Peter Zijlstra 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393 394
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
	struct task_struct *p = task_of(se);
	struct rq *rq = task_rq(p);

	return &rq->cfs;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
	return NULL;
}

395 396 397 398 399 400 401 402
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}

static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}

P
Peter Zijlstra 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)

static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
	return 1;
}

static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
	return NULL;
}

417 418 419 420 421
static inline void
find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{
}

P
Peter Zijlstra 已提交
422 423
#endif	/* CONFIG_FAIR_GROUP_SCHED */

424 425
static __always_inline
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
426 427 428 429 430

/**************************************************************
 * Scheduling class tree data structure manipulation methods:
 */

431
static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
432
{
433 434
	s64 delta = (s64)(vruntime - min_vruntime);
	if (delta > 0)
435 436 437 438 439
		min_vruntime = vruntime;

	return min_vruntime;
}

440
static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
P
Peter Zijlstra 已提交
441 442 443 444 445 446 447 448
{
	s64 delta = (s64)(vruntime - min_vruntime);
	if (delta < 0)
		min_vruntime = vruntime;

	return min_vruntime;
}

449 450 451 452 453 454
static inline int entity_before(struct sched_entity *a,
				struct sched_entity *b)
{
	return (s64)(a->vruntime - b->vruntime) < 0;
}

455 456 457 458 459 460 461 462 463 464 465 466
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
	u64 vruntime = cfs_rq->min_vruntime;

	if (cfs_rq->curr)
		vruntime = cfs_rq->curr->vruntime;

	if (cfs_rq->rb_leftmost) {
		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
						   struct sched_entity,
						   run_node);

P
Peter Zijlstra 已提交
467
		if (!cfs_rq->curr)
468 469 470 471 472 473
			vruntime = se->vruntime;
		else
			vruntime = min_vruntime(vruntime, se->vruntime);
	}

	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
474 475 476 477
#ifndef CONFIG_64BIT
	smp_wmb();
	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
478 479
}

480 481 482
/*
 * Enqueue an entity into the rb-tree:
 */
483
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
{
	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
	struct rb_node *parent = NULL;
	struct sched_entity *entry;
	int leftmost = 1;

	/*
	 * Find the right place in the rbtree:
	 */
	while (*link) {
		parent = *link;
		entry = rb_entry(parent, struct sched_entity, run_node);
		/*
		 * We dont care about collisions. Nodes with
		 * the same key stay together.
		 */
500
		if (entity_before(se, entry)) {
501 502 503 504 505 506 507 508 509 510 511
			link = &parent->rb_left;
		} else {
			link = &parent->rb_right;
			leftmost = 0;
		}
	}

	/*
	 * Maintain a cache of leftmost tree entries (it is frequently
	 * used):
	 */
512
	if (leftmost)
I
Ingo Molnar 已提交
513
		cfs_rq->rb_leftmost = &se->run_node;
514 515 516 517 518

	rb_link_node(&se->run_node, parent, link);
	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
}

519
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
520
{
P
Peter Zijlstra 已提交
521 522 523 524 525 526
	if (cfs_rq->rb_leftmost == &se->run_node) {
		struct rb_node *next_node;

		next_node = rb_next(&se->run_node);
		cfs_rq->rb_leftmost = next_node;
	}
I
Ingo Molnar 已提交
527

528 529 530
	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}

531
struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
532
{
533 534 535 536 537 538
	struct rb_node *left = cfs_rq->rb_leftmost;

	if (!left)
		return NULL;

	return rb_entry(left, struct sched_entity, run_node);
539 540
}

541 542 543 544 545 546 547 548 549 550 551
static struct sched_entity *__pick_next_entity(struct sched_entity *se)
{
	struct rb_node *next = rb_next(&se->run_node);

	if (!next)
		return NULL;

	return rb_entry(next, struct sched_entity, run_node);
}

#ifdef CONFIG_SCHED_DEBUG
552
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
553
{
554
	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
555

556 557
	if (!last)
		return NULL;
558 559

	return rb_entry(last, struct sched_entity, run_node);
560 561
}

562 563 564 565
/**************************************************************
 * Scheduling class statistics methods:
 */

566
int sched_proc_update_handler(struct ctl_table *table, int write,
567
		void __user *buffer, size_t *lenp,
568 569
		loff_t *ppos)
{
570
	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
571
	int factor = get_update_sysctl_factor();
572 573 574 575 576 577 578

	if (ret || !write)
		return ret;

	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
					sysctl_sched_min_granularity);

579 580 581 582 583 584 585
#define WRT_SYSCTL(name) \
	(normalized_sysctl_##name = sysctl_##name / (factor))
	WRT_SYSCTL(sched_min_granularity);
	WRT_SYSCTL(sched_latency);
	WRT_SYSCTL(sched_wakeup_granularity);
#undef WRT_SYSCTL

586 587 588
	return 0;
}
#endif
589

590
/*
591
 * delta /= w
592 593 594 595
 */
static inline unsigned long
calc_delta_fair(unsigned long delta, struct sched_entity *se)
{
596 597
	if (unlikely(se->load.weight != NICE_0_LOAD))
		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
598 599 600 601

	return delta;
}

602 603 604
/*
 * The idea is to set a period in which each task runs once.
 *
605
 * When there are too many tasks (sched_nr_latency) we have to stretch
606 607 608 609
 * this period because otherwise the slices get too small.
 *
 * p = (nr <= nl) ? l : l*nr/nl
 */
610 611 612
static u64 __sched_period(unsigned long nr_running)
{
	u64 period = sysctl_sched_latency;
613
	unsigned long nr_latency = sched_nr_latency;
614 615

	if (unlikely(nr_running > nr_latency)) {
616
		period = sysctl_sched_min_granularity;
617 618 619 620 621 622
		period *= nr_running;
	}

	return period;
}

623 624 625 626
/*
 * We calculate the wall-time slice from the period by taking a part
 * proportional to the weight.
 *
627
 * s = p*P[w/rw]
628
 */
P
Peter Zijlstra 已提交
629
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
630
{
M
Mike Galbraith 已提交
631
	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
632

M
Mike Galbraith 已提交
633
	for_each_sched_entity(se) {
L
Lin Ming 已提交
634
		struct load_weight *load;
635
		struct load_weight lw;
L
Lin Ming 已提交
636 637 638

		cfs_rq = cfs_rq_of(se);
		load = &cfs_rq->load;
639

M
Mike Galbraith 已提交
640
		if (unlikely(!se->on_rq)) {
641
			lw = cfs_rq->load;
M
Mike Galbraith 已提交
642 643 644 645 646 647 648

			update_load_add(&lw, se->load.weight);
			load = &lw;
		}
		slice = calc_delta_mine(slice, se->load.weight, load);
	}
	return slice;
649 650
}

651
/*
652
 * We calculate the vruntime slice of a to be inserted task
653
 *
654
 * vs = s/w
655
 */
656
static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
P
Peter Zijlstra 已提交
657
{
658
	return calc_delta_fair(sched_slice(cfs_rq, se), se);
659 660
}

661
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
662
static void update_cfs_shares(struct cfs_rq *cfs_rq);
663

664 665 666 667 668
/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
static inline void
I
Ingo Molnar 已提交
669 670
__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	      unsigned long delta_exec)
671
{
672
	unsigned long delta_exec_weighted;
673

674 675
	schedstat_set(curr->statistics.exec_max,
		      max((u64)delta_exec, curr->statistics.exec_max));
676 677

	curr->sum_exec_runtime += delta_exec;
678
	schedstat_add(cfs_rq, exec_clock, delta_exec);
679
	delta_exec_weighted = calc_delta_fair(delta_exec, curr);
680

I
Ingo Molnar 已提交
681
	curr->vruntime += delta_exec_weighted;
682
	update_min_vruntime(cfs_rq);
683

P
Peter Zijlstra 已提交
684
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
685 686
	cfs_rq->load_unacc_exec_time += delta_exec;
#endif
687 688
}

689
static void update_curr(struct cfs_rq *cfs_rq)
690
{
691
	struct sched_entity *curr = cfs_rq->curr;
692
	u64 now = rq_of(cfs_rq)->clock_task;
693 694 695 696 697 698 699 700 701 702
	unsigned long delta_exec;

	if (unlikely(!curr))
		return;

	/*
	 * Get the amount of time the current task was running
	 * since the last time we changed load (this cannot
	 * overflow on 32 bits):
	 */
I
Ingo Molnar 已提交
703
	delta_exec = (unsigned long)(now - curr->exec_start);
P
Peter Zijlstra 已提交
704 705
	if (!delta_exec)
		return;
706

I
Ingo Molnar 已提交
707 708
	__update_curr(cfs_rq, curr, delta_exec);
	curr->exec_start = now;
709 710 711 712

	if (entity_is_task(curr)) {
		struct task_struct *curtask = task_of(curr);

713
		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
714
		cpuacct_charge(curtask, delta_exec);
715
		account_group_exec_runtime(curtask, delta_exec);
716
	}
717 718

	account_cfs_rq_runtime(cfs_rq, delta_exec);
719 720 721
}

static inline void
722
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
723
{
724
	schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
725 726 727 728 729
}

/*
 * Task is being enqueued - update stats:
 */
730
static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
731 732 733 734 735
{
	/*
	 * Are we enqueueing a waiting task? (for current tasks
	 * a dequeue/enqueue event is a NOP)
	 */
736
	if (se != cfs_rq->curr)
737
		update_stats_wait_start(cfs_rq, se);
738 739 740
}

static void
741
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
742
{
743 744 745 746 747
	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
			rq_of(cfs_rq)->clock - se->statistics.wait_start));
	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
			rq_of(cfs_rq)->clock - se->statistics.wait_start);
748 749 750
#ifdef CONFIG_SCHEDSTATS
	if (entity_is_task(se)) {
		trace_sched_stat_wait(task_of(se),
751
			rq_of(cfs_rq)->clock - se->statistics.wait_start);
752 753
	}
#endif
754
	schedstat_set(se->statistics.wait_start, 0);
755 756 757
}

static inline void
758
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
759 760 761 762 763
{
	/*
	 * Mark the end of the wait period if dequeueing a
	 * waiting task:
	 */
764
	if (se != cfs_rq->curr)
765
		update_stats_wait_end(cfs_rq, se);
766 767 768 769 770 771
}

/*
 * We are picking a new current task - update its stats:
 */
static inline void
772
update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
773 774 775 776
{
	/*
	 * We are starting a new run period:
	 */
777
	se->exec_start = rq_of(cfs_rq)->clock_task;
778 779 780 781 782 783
}

/**************************************************
 * Scheduling class queueing methods:
 */

784 785 786 787
static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	update_load_add(&cfs_rq->load, se->load.weight);
788
	if (!parent_entity(se))
789
		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
790 791
#ifdef CONFIG_SMP
	if (entity_is_task(se))
792
		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
793
#endif
794 795 796 797 798 799 800
	cfs_rq->nr_running++;
}

static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	update_load_sub(&cfs_rq->load, se->load.weight);
801
	if (!parent_entity(se))
802
		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
803
	if (entity_is_task(se))
804
		list_del_init(&se->group_node);
805 806 807
	cfs_rq->nr_running--;
}

808
#ifdef CONFIG_FAIR_GROUP_SCHED
809 810
/* we need this in update_cfs_load and load-balance functions below */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
811
# ifdef CONFIG_SMP
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
					    int global_update)
{
	struct task_group *tg = cfs_rq->tg;
	long load_avg;

	load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
	load_avg -= cfs_rq->load_contribution;

	if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
		atomic_add(load_avg, &tg->load_weight);
		cfs_rq->load_contribution += load_avg;
	}
}

static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
P
Peter Zijlstra 已提交
828
{
829
	u64 period = sysctl_sched_shares_window;
P
Peter Zijlstra 已提交
830
	u64 now, delta;
831
	unsigned long load = cfs_rq->load.weight;
P
Peter Zijlstra 已提交
832

833
	if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
P
Peter Zijlstra 已提交
834 835
		return;

836
	now = rq_of(cfs_rq)->clock_task;
P
Peter Zijlstra 已提交
837 838
	delta = now - cfs_rq->load_stamp;

839 840 841 842 843
	/* truncate load history at 4 idle periods */
	if (cfs_rq->load_stamp > cfs_rq->load_last &&
	    now - cfs_rq->load_last > 4 * period) {
		cfs_rq->load_period = 0;
		cfs_rq->load_avg = 0;
844
		delta = period - 1;
845 846
	}

P
Peter Zijlstra 已提交
847
	cfs_rq->load_stamp = now;
848
	cfs_rq->load_unacc_exec_time = 0;
P
Peter Zijlstra 已提交
849
	cfs_rq->load_period += delta;
850 851 852 853
	if (load) {
		cfs_rq->load_last = now;
		cfs_rq->load_avg += delta * load;
	}
P
Peter Zijlstra 已提交
854

855 856 857 858 859
	/* consider updating load contribution on each fold or truncate */
	if (global_update || cfs_rq->load_period > period
	    || !cfs_rq->load_period)
		update_cfs_rq_load_contribution(cfs_rq, global_update);

P
Peter Zijlstra 已提交
860 861 862 863 864 865 866 867 868 869
	while (cfs_rq->load_period > period) {
		/*
		 * Inline assembly required to prevent the compiler
		 * optimising this loop into a divmod call.
		 * See __iter_div_u64_rem() for another example of this.
		 */
		asm("" : "+rm" (cfs_rq->load_period));
		cfs_rq->load_period /= 2;
		cfs_rq->load_avg /= 2;
	}
870

871 872
	if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
		list_del_leaf_cfs_rq(cfs_rq);
P
Peter Zijlstra 已提交
873 874
}

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
{
	long tg_weight;

	/*
	 * Use this CPU's actual weight instead of the last load_contribution
	 * to gain a more accurate current total weight. See
	 * update_cfs_rq_load_contribution().
	 */
	tg_weight = atomic_read(&tg->load_weight);
	tg_weight -= cfs_rq->load_contribution;
	tg_weight += cfs_rq->load.weight;

	return tg_weight;
}

891
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
892
{
893
	long tg_weight, load, shares;
894

895
	tg_weight = calc_tg_weight(tg, cfs_rq);
896
	load = cfs_rq->load.weight;
897 898

	shares = (tg->shares * load);
899 900
	if (tg_weight)
		shares /= tg_weight;
901 902 903 904 905 906 907 908 909 910 911 912 913

	if (shares < MIN_SHARES)
		shares = MIN_SHARES;
	if (shares > tg->shares)
		shares = tg->shares;

	return shares;
}

static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
{
	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
		update_cfs_load(cfs_rq, 0);
914
		update_cfs_shares(cfs_rq);
915 916 917 918 919 920 921
	}
}
# else /* CONFIG_SMP */
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
{
}

922
static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
923 924 925 926 927 928 929 930
{
	return tg->shares;
}

static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
{
}
# endif /* CONFIG_SMP */
P
Peter Zijlstra 已提交
931 932 933
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
			    unsigned long weight)
{
934 935 936 937
	if (se->on_rq) {
		/* commit outstanding execution time */
		if (cfs_rq->curr == se)
			update_curr(cfs_rq);
P
Peter Zijlstra 已提交
938
		account_entity_dequeue(cfs_rq, se);
939
	}
P
Peter Zijlstra 已提交
940 941 942 943 944 945 946

	update_load_set(&se->load, weight);

	if (se->on_rq)
		account_entity_enqueue(cfs_rq, se);
}

947
static void update_cfs_shares(struct cfs_rq *cfs_rq)
P
Peter Zijlstra 已提交
948 949 950
{
	struct task_group *tg;
	struct sched_entity *se;
951
	long shares;
P
Peter Zijlstra 已提交
952 953 954

	tg = cfs_rq->tg;
	se = tg->se[cpu_of(rq_of(cfs_rq))];
955
	if (!se || throttled_hierarchy(cfs_rq))
P
Peter Zijlstra 已提交
956
		return;
957 958 959 960
#ifndef CONFIG_SMP
	if (likely(se->load.weight == tg->shares))
		return;
#endif
961
	shares = calc_cfs_shares(cfs_rq, tg);
P
Peter Zijlstra 已提交
962 963 964 965

	reweight_entity(cfs_rq_of(se), se, shares);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
966
static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
P
Peter Zijlstra 已提交
967 968 969
{
}

970
static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
P
Peter Zijlstra 已提交
971 972
{
}
973 974 975 976

static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
{
}
P
Peter Zijlstra 已提交
977 978
#endif /* CONFIG_FAIR_GROUP_SCHED */

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
#ifdef CONFIG_SMP
/*
 * Approximate:
 *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
 */
static __always_inline u64 decay_load(u64 val, u64 n)
{
	for (; n && val; n--) {
		val *= 4008;
		val >>= 12;
	}

	return val;
}

/*
 * We can represent the historical contribution to runnable average as the
 * coefficients of a geometric series.  To do this we sub-divide our runnable
 * history into segments of approximately 1ms (1024us); label the segment that
 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
 *
 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
 *      p0            p1           p2
 *     (now)       (~1ms ago)  (~2ms ago)
 *
 * Let u_i denote the fraction of p_i that the entity was runnable.
 *
 * We then designate the fractions u_i as our co-efficients, yielding the
 * following representation of historical load:
 *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
 *
 * We choose y based on the with of a reasonably scheduling period, fixing:
 *   y^32 = 0.5
 *
 * This means that the contribution to load ~32ms ago (u_32) will be weighted
 * approximately half as much as the contribution to load within the last ms
 * (u_0).
 *
 * When a period "rolls over" and we have new u_0`, multiplying the previous
 * sum again by y is sufficient to update:
 *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
 *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
 */
static __always_inline int __update_entity_runnable_avg(u64 now,
							struct sched_avg *sa,
							int runnable)
{
	u64 delta;
	int delta_w, decayed = 0;

	delta = now - sa->last_runnable_update;
	/*
	 * This should only happen when time goes backwards, which it
	 * unfortunately does during sched clock init when we swap over to TSC.
	 */
	if ((s64)delta < 0) {
		sa->last_runnable_update = now;
		return 0;
	}

	/*
	 * Use 1024ns as the unit of measurement since it's a reasonable
	 * approximation of 1us and fast to compute.
	 */
	delta >>= 10;
	if (!delta)
		return 0;
	sa->last_runnable_update = now;

	/* delta_w is the amount already accumulated against our next period */
	delta_w = sa->runnable_avg_period % 1024;
	if (delta + delta_w >= 1024) {
		/* period roll-over */
		decayed = 1;

		/*
		 * Now that we know we're crossing a period boundary, figure
		 * out how much from delta we need to complete the current
		 * period and accrue it.
		 */
		delta_w = 1024 - delta_w;
		BUG_ON(delta_w > delta);
		do {
			if (runnable)
				sa->runnable_avg_sum += delta_w;
			sa->runnable_avg_period += delta_w;

			/*
			 * Remainder of delta initiates a new period, roll over
			 * the previous.
			 */
			sa->runnable_avg_sum =
				decay_load(sa->runnable_avg_sum, 1);
			sa->runnable_avg_period =
				decay_load(sa->runnable_avg_period, 1);

			delta -= delta_w;
			/* New period is empty */
			delta_w = 1024;
		} while (delta >= 1024);
	}

	/* Remainder of delta accrued against u_0` */
	if (runnable)
		sa->runnable_avg_sum += delta;
	sa->runnable_avg_period += delta;

	return decayed;
}

1089
/* Synchronize an entity's decay with its parenting cfs_rq.*/
1090
static inline u64 __synchronize_entity_decay(struct sched_entity *se)
1091 1092 1093 1094 1095 1096
{
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	u64 decays = atomic64_read(&cfs_rq->decay_counter);

	decays -= se->avg.decay_count;
	if (!decays)
1097
		return 0;
1098 1099 1100

	se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
	se->avg.decay_count = 0;
1101 1102

	return decays;
1103 1104
}

1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
						 int force_update)
{
	struct task_group *tg = cfs_rq->tg;
	s64 tg_contrib;

	tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
	tg_contrib -= cfs_rq->tg_load_contrib;

	if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
		atomic64_add(tg_contrib, &tg->load_avg);
		cfs_rq->tg_load_contrib += tg_contrib;
	}
}
1120

1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
/*
 * Aggregate cfs_rq runnable averages into an equivalent task_group
 * representation for computing load contributions.
 */
static inline void __update_tg_runnable_avg(struct sched_avg *sa,
						  struct cfs_rq *cfs_rq)
{
	struct task_group *tg = cfs_rq->tg;
	long contrib;

	/* The fraction of a cpu used by this cfs_rq */
	contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
			  sa->runnable_avg_period + 1);
	contrib -= cfs_rq->tg_runnable_contrib;

	if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
		atomic_add(contrib, &tg->runnable_avg);
		cfs_rq->tg_runnable_contrib += contrib;
	}
}

1142 1143 1144 1145
static inline void __update_group_entity_contrib(struct sched_entity *se)
{
	struct cfs_rq *cfs_rq = group_cfs_rq(se);
	struct task_group *tg = cfs_rq->tg;
1146 1147
	int runnable_avg;

1148 1149 1150 1151 1152
	u64 contrib;

	contrib = cfs_rq->tg_load_contrib * tg->shares;
	se->avg.load_avg_contrib = div64_u64(contrib,
					     atomic64_read(&tg->load_avg) + 1);
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181

	/*
	 * For group entities we need to compute a correction term in the case
	 * that they are consuming <1 cpu so that we would contribute the same
	 * load as a task of equal weight.
	 *
	 * Explicitly co-ordinating this measurement would be expensive, but
	 * fortunately the sum of each cpus contribution forms a usable
	 * lower-bound on the true value.
	 *
	 * Consider the aggregate of 2 contributions.  Either they are disjoint
	 * (and the sum represents true value) or they are disjoint and we are
	 * understating by the aggregate of their overlap.
	 *
	 * Extending this to N cpus, for a given overlap, the maximum amount we
	 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
	 * cpus that overlap for this interval and w_i is the interval width.
	 *
	 * On a small machine; the first term is well-bounded which bounds the
	 * total error since w_i is a subset of the period.  Whereas on a
	 * larger machine, while this first term can be larger, if w_i is the
	 * of consequential size guaranteed to see n_i*w_i quickly converge to
	 * our upper bound of 1-cpu.
	 */
	runnable_avg = atomic_read(&tg->runnable_avg);
	if (runnable_avg < NICE_0_LOAD) {
		se->avg.load_avg_contrib *= runnable_avg;
		se->avg.load_avg_contrib >>= NICE_0_SHIFT;
	}
1182
}
1183 1184 1185
#else
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
						 int force_update) {}
1186 1187
static inline void __update_tg_runnable_avg(struct sched_avg *sa,
						  struct cfs_rq *cfs_rq) {}
1188
static inline void __update_group_entity_contrib(struct sched_entity *se) {}
1189 1190
#endif

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
static inline void __update_task_entity_contrib(struct sched_entity *se)
{
	u32 contrib;

	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
	contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
	contrib /= (se->avg.runnable_avg_period + 1);
	se->avg.load_avg_contrib = scale_load(contrib);
}

1201 1202 1203 1204 1205
/* Compute the current contribution to load_avg by se, return any delta */
static long __update_entity_load_avg_contrib(struct sched_entity *se)
{
	long old_contrib = se->avg.load_avg_contrib;

1206 1207 1208
	if (entity_is_task(se)) {
		__update_task_entity_contrib(se);
	} else {
1209
		__update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
1210 1211
		__update_group_entity_contrib(se);
	}
1212 1213 1214 1215

	return se->avg.load_avg_contrib - old_contrib;
}

1216 1217 1218 1219 1220 1221 1222 1223 1224
static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
						 long load_contrib)
{
	if (likely(load_contrib < cfs_rq->blocked_load_avg))
		cfs_rq->blocked_load_avg -= load_contrib;
	else
		cfs_rq->blocked_load_avg = 0;
}

1225 1226
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);

1227
/* Update a sched_entity's runnable average */
1228 1229
static inline void update_entity_load_avg(struct sched_entity *se,
					  int update_cfs_rq)
1230
{
1231 1232
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	long contrib_delta;
1233
	u64 now;
1234

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	/*
	 * For a group entity we need to use their owned cfs_rq_clock_task() in
	 * case they are the parent of a throttled hierarchy.
	 */
	if (entity_is_task(se))
		now = cfs_rq_clock_task(cfs_rq);
	else
		now = cfs_rq_clock_task(group_cfs_rq(se));

	if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
1245 1246 1247
		return;

	contrib_delta = __update_entity_load_avg_contrib(se);
1248 1249 1250 1251

	if (!update_cfs_rq)
		return;

1252 1253
	if (se->on_rq)
		cfs_rq->runnable_load_avg += contrib_delta;
1254 1255 1256 1257 1258 1259 1260 1261
	else
		subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
}

/*
 * Decay the load contributed by all blocked children and account this so that
 * their contribution may appropriately discounted when they wake up.
 */
1262
static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
1263
{
1264
	u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
1265 1266 1267
	u64 decays;

	decays = now - cfs_rq->last_decay;
1268
	if (!decays && !force_update)
1269 1270
		return;

1271 1272 1273 1274
	if (atomic64_read(&cfs_rq->removed_load)) {
		u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0);
		subtract_blocked_load_contrib(cfs_rq, removed_load);
	}
1275

1276 1277 1278 1279 1280 1281
	if (decays) {
		cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
						      decays);
		atomic64_add(decays, &cfs_rq->decay_counter);
		cfs_rq->last_decay = now;
	}
1282 1283

	__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
1284
}
1285 1286 1287 1288

static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
{
	__update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
1289
	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
1290
}
1291 1292 1293

/* Add the load generated by se into cfs_rq's child load-average */
static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1294 1295
						  struct sched_entity *se,
						  int wakeup)
1296
{
1297 1298 1299 1300 1301 1302
	/*
	 * We track migrations using entity decay_count <= 0, on a wake-up
	 * migration we use a negative decay count to track the remote decays
	 * accumulated while sleeping.
	 */
	if (unlikely(se->avg.decay_count <= 0)) {
1303
		se->avg.last_runnable_update = rq_of(cfs_rq)->clock_task;
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
		if (se->avg.decay_count) {
			/*
			 * In a wake-up migration we have to approximate the
			 * time sleeping.  This is because we can't synchronize
			 * clock_task between the two cpus, and it is not
			 * guaranteed to be read-safe.  Instead, we can
			 * approximate this using our carried decays, which are
			 * explicitly atomically readable.
			 */
			se->avg.last_runnable_update -= (-se->avg.decay_count)
							<< 20;
			update_entity_load_avg(se, 0);
			/* Indicate that we're now synchronized and on-rq */
			se->avg.decay_count = 0;
		}
1319 1320 1321 1322 1323
		wakeup = 0;
	} else {
		__synchronize_entity_decay(se);
	}

1324 1325
	/* migrated tasks did not contribute to our blocked load */
	if (wakeup) {
1326
		subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
1327 1328
		update_entity_load_avg(se, 0);
	}
1329

1330
	cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
1331 1332
	/* we force update consideration on load-balancer moves */
	update_cfs_rq_blocked_load(cfs_rq, !wakeup);
1333 1334
}

1335 1336 1337 1338 1339
/*
 * Remove se's load from this cfs_rq child load-average, if the entity is
 * transitioning to a blocked state we track its projected decay using
 * blocked_load_avg.
 */
1340
static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1341 1342
						  struct sched_entity *se,
						  int sleep)
1343
{
1344
	update_entity_load_avg(se, 1);
1345 1346
	/* we force update consideration on load-balancer moves */
	update_cfs_rq_blocked_load(cfs_rq, !sleep);
1347

1348
	cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
1349 1350 1351 1352
	if (sleep) {
		cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
		se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
	} /* migrations, e.g. sleep=0 leave decay_count == 0 */
1353
}
1354
#else
1355 1356
static inline void update_entity_load_avg(struct sched_entity *se,
					  int update_cfs_rq) {}
1357
static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
1358
static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1359 1360
					   struct sched_entity *se,
					   int wakeup) {}
1361
static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1362 1363
					   struct sched_entity *se,
					   int sleep) {}
1364 1365
static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
					      int force_update) {}
1366 1367
#endif

1368
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1369 1370
{
#ifdef CONFIG_SCHEDSTATS
1371 1372 1373 1374 1375
	struct task_struct *tsk = NULL;

	if (entity_is_task(se))
		tsk = task_of(se);

1376 1377
	if (se->statistics.sleep_start) {
		u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
1378 1379 1380 1381

		if ((s64)delta < 0)
			delta = 0;

1382 1383
		if (unlikely(delta > se->statistics.sleep_max))
			se->statistics.sleep_max = delta;
1384

1385
		se->statistics.sleep_start = 0;
1386
		se->statistics.sum_sleep_runtime += delta;
A
Arjan van de Ven 已提交
1387

1388
		if (tsk) {
1389
			account_scheduler_latency(tsk, delta >> 10, 1);
1390 1391
			trace_sched_stat_sleep(tsk, delta);
		}
1392
	}
1393 1394
	if (se->statistics.block_start) {
		u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
1395 1396 1397 1398

		if ((s64)delta < 0)
			delta = 0;

1399 1400
		if (unlikely(delta > se->statistics.block_max))
			se->statistics.block_max = delta;
1401

1402
		se->statistics.block_start = 0;
1403
		se->statistics.sum_sleep_runtime += delta;
I
Ingo Molnar 已提交
1404

1405
		if (tsk) {
1406
			if (tsk->in_iowait) {
1407 1408
				se->statistics.iowait_sum += delta;
				se->statistics.iowait_count++;
1409
				trace_sched_stat_iowait(tsk, delta);
1410 1411
			}

1412 1413
			trace_sched_stat_blocked(tsk, delta);

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
			/*
			 * Blocking time is in units of nanosecs, so shift by
			 * 20 to get a milliseconds-range estimation of the
			 * amount of time that the task spent sleeping:
			 */
			if (unlikely(prof_on == SLEEP_PROFILING)) {
				profile_hits(SLEEP_PROFILING,
						(void *)get_wchan(tsk),
						delta >> 20);
			}
			account_scheduler_latency(tsk, delta >> 10, 0);
I
Ingo Molnar 已提交
1425
		}
1426 1427 1428 1429
	}
#endif
}

P
Peter Zijlstra 已提交
1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
#ifdef CONFIG_SCHED_DEBUG
	s64 d = se->vruntime - cfs_rq->min_vruntime;

	if (d < 0)
		d = -d;

	if (d > 3*sysctl_sched_latency)
		schedstat_inc(cfs_rq, nr_spread_over);
#endif
}

1443 1444 1445
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
1446
	u64 vruntime = cfs_rq->min_vruntime;
P
Peter Zijlstra 已提交
1447

1448 1449 1450 1451 1452 1453
	/*
	 * The 'current' period is already promised to the current tasks,
	 * however the extra weight of the new task will slow them down a
	 * little, place the new task so that it fits in the slot that
	 * stays open at the end.
	 */
P
Peter Zijlstra 已提交
1454
	if (initial && sched_feat(START_DEBIT))
1455
		vruntime += sched_vslice(cfs_rq, se);
1456

1457
	/* sleeps up to a single latency don't count. */
1458
	if (!initial) {
1459
		unsigned long thresh = sysctl_sched_latency;
1460

1461 1462 1463 1464 1465 1466
		/*
		 * Halve their sleep time's effect, to allow
		 * for a gentler effect of sleepers:
		 */
		if (sched_feat(GENTLE_FAIR_SLEEPERS))
			thresh >>= 1;
1467

1468
		vruntime -= thresh;
1469 1470
	}

1471 1472 1473
	/* ensure we never gain time by being placed backwards. */
	vruntime = max_vruntime(se->vruntime, vruntime);

P
Peter Zijlstra 已提交
1474
	se->vruntime = vruntime;
1475 1476
}

1477 1478
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);

1479
static void
1480
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1481
{
1482 1483 1484 1485
	/*
	 * Update the normalized vruntime before updating min_vruntime
	 * through callig update_curr().
	 */
1486
	if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
1487 1488
		se->vruntime += cfs_rq->min_vruntime;

1489
	/*
1490
	 * Update run-time statistics of the 'current'.
1491
	 */
1492
	update_curr(cfs_rq);
1493
	update_cfs_load(cfs_rq, 0);
1494
	enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
P
Peter Zijlstra 已提交
1495
	account_entity_enqueue(cfs_rq, se);
1496
	update_cfs_shares(cfs_rq);
1497

1498
	if (flags & ENQUEUE_WAKEUP) {
1499
		place_entity(cfs_rq, se, 0);
1500
		enqueue_sleeper(cfs_rq, se);
I
Ingo Molnar 已提交
1501
	}
1502

1503
	update_stats_enqueue(cfs_rq, se);
P
Peter Zijlstra 已提交
1504
	check_spread(cfs_rq, se);
1505 1506
	if (se != cfs_rq->curr)
		__enqueue_entity(cfs_rq, se);
P
Peter Zijlstra 已提交
1507
	se->on_rq = 1;
1508

1509
	if (cfs_rq->nr_running == 1) {
1510
		list_add_leaf_cfs_rq(cfs_rq);
1511 1512
		check_enqueue_throttle(cfs_rq);
	}
1513 1514
}

1515
static void __clear_buddies_last(struct sched_entity *se)
P
Peter Zijlstra 已提交
1516
{
1517 1518 1519 1520 1521 1522 1523 1524
	for_each_sched_entity(se) {
		struct cfs_rq *cfs_rq = cfs_rq_of(se);
		if (cfs_rq->last == se)
			cfs_rq->last = NULL;
		else
			break;
	}
}
P
Peter Zijlstra 已提交
1525

1526 1527 1528 1529 1530 1531 1532 1533 1534
static void __clear_buddies_next(struct sched_entity *se)
{
	for_each_sched_entity(se) {
		struct cfs_rq *cfs_rq = cfs_rq_of(se);
		if (cfs_rq->next == se)
			cfs_rq->next = NULL;
		else
			break;
	}
P
Peter Zijlstra 已提交
1535 1536
}

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
static void __clear_buddies_skip(struct sched_entity *se)
{
	for_each_sched_entity(se) {
		struct cfs_rq *cfs_rq = cfs_rq_of(se);
		if (cfs_rq->skip == se)
			cfs_rq->skip = NULL;
		else
			break;
	}
}

P
Peter Zijlstra 已提交
1548 1549
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
1550 1551 1552 1553 1554
	if (cfs_rq->last == se)
		__clear_buddies_last(se);

	if (cfs_rq->next == se)
		__clear_buddies_next(se);
1555 1556 1557

	if (cfs_rq->skip == se)
		__clear_buddies_skip(se);
P
Peter Zijlstra 已提交
1558 1559
}

1560
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1561

1562
static void
1563
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1564
{
1565 1566 1567 1568
	/*
	 * Update run-time statistics of the 'current'.
	 */
	update_curr(cfs_rq);
1569
	dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1570

1571
	update_stats_dequeue(cfs_rq, se);
1572
	if (flags & DEQUEUE_SLEEP) {
P
Peter Zijlstra 已提交
1573
#ifdef CONFIG_SCHEDSTATS
1574 1575 1576 1577
		if (entity_is_task(se)) {
			struct task_struct *tsk = task_of(se);

			if (tsk->state & TASK_INTERRUPTIBLE)
1578
				se->statistics.sleep_start = rq_of(cfs_rq)->clock;
1579
			if (tsk->state & TASK_UNINTERRUPTIBLE)
1580
				se->statistics.block_start = rq_of(cfs_rq)->clock;
1581
		}
1582
#endif
P
Peter Zijlstra 已提交
1583 1584
	}

P
Peter Zijlstra 已提交
1585
	clear_buddies(cfs_rq, se);
P
Peter Zijlstra 已提交
1586

1587
	if (se != cfs_rq->curr)
1588
		__dequeue_entity(cfs_rq, se);
P
Peter Zijlstra 已提交
1589
	se->on_rq = 0;
1590
	update_cfs_load(cfs_rq, 0);
1591
	account_entity_dequeue(cfs_rq, se);
1592 1593 1594 1595 1596 1597

	/*
	 * Normalize the entity after updating the min_vruntime because the
	 * update can refer to the ->curr item and we need to reflect this
	 * movement in our normalized position.
	 */
1598
	if (!(flags & DEQUEUE_SLEEP))
1599
		se->vruntime -= cfs_rq->min_vruntime;
1600

1601 1602 1603
	/* return excess runtime on last dequeue */
	return_cfs_rq_runtime(cfs_rq);

1604 1605
	update_min_vruntime(cfs_rq);
	update_cfs_shares(cfs_rq);
1606 1607 1608 1609 1610
}

/*
 * Preempt the current task with a newly woken task if needed:
 */
1611
static void
I
Ingo Molnar 已提交
1612
check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1613
{
1614
	unsigned long ideal_runtime, delta_exec;
1615 1616
	struct sched_entity *se;
	s64 delta;
1617

P
Peter Zijlstra 已提交
1618
	ideal_runtime = sched_slice(cfs_rq, curr);
1619
	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1620
	if (delta_exec > ideal_runtime) {
1621
		resched_task(rq_of(cfs_rq)->curr);
1622 1623 1624 1625 1626
		/*
		 * The current task ran long enough, ensure it doesn't get
		 * re-elected due to buddy favours.
		 */
		clear_buddies(cfs_rq, curr);
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
		return;
	}

	/*
	 * Ensure that a task that missed wakeup preemption by a
	 * narrow margin doesn't have to wait for a full slice.
	 * This also mitigates buddy induced latencies under load.
	 */
	if (delta_exec < sysctl_sched_min_granularity)
		return;

1638 1639
	se = __pick_first_entity(cfs_rq);
	delta = curr->vruntime - se->vruntime;
1640

1641 1642
	if (delta < 0)
		return;
1643

1644 1645
	if (delta > ideal_runtime)
		resched_task(rq_of(cfs_rq)->curr);
1646 1647
}

1648
static void
1649
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1650
{
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
	/* 'current' is not kept within the tree. */
	if (se->on_rq) {
		/*
		 * Any task has to be enqueued before it get to execute on
		 * a CPU. So account for the time it spent waiting on the
		 * runqueue.
		 */
		update_stats_wait_end(cfs_rq, se);
		__dequeue_entity(cfs_rq, se);
	}

1662
	update_stats_curr_start(cfs_rq, se);
1663
	cfs_rq->curr = se;
I
Ingo Molnar 已提交
1664 1665 1666 1667 1668 1669
#ifdef CONFIG_SCHEDSTATS
	/*
	 * Track our maximum slice length, if the CPU's load is at
	 * least twice that of our own weight (i.e. dont track it
	 * when there are only lesser-weight tasks around):
	 */
1670
	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1671
		se->statistics.slice_max = max(se->statistics.slice_max,
I
Ingo Molnar 已提交
1672 1673 1674
			se->sum_exec_runtime - se->prev_sum_exec_runtime);
	}
#endif
1675
	se->prev_sum_exec_runtime = se->sum_exec_runtime;
1676 1677
}

1678 1679 1680
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);

1681 1682 1683 1684 1685 1686 1687
/*
 * Pick the next process, keeping these things in mind, in this order:
 * 1) keep things fair between processes/task groups
 * 2) pick the "next" process, since someone really wants that to run
 * 3) pick the "last" process, for cache locality
 * 4) do not run the "skip" process, if something else is available
 */
1688
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1689
{
1690
	struct sched_entity *se = __pick_first_entity(cfs_rq);
1691
	struct sched_entity *left = se;
1692

1693 1694 1695 1696 1697 1698 1699 1700 1701
	/*
	 * Avoid running the skip buddy, if running something else can
	 * be done without getting too unfair.
	 */
	if (cfs_rq->skip == se) {
		struct sched_entity *second = __pick_next_entity(se);
		if (second && wakeup_preempt_entity(second, left) < 1)
			se = second;
	}
1702

1703 1704 1705 1706 1707 1708
	/*
	 * Prefer last buddy, try to return the CPU to a preempted task.
	 */
	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
		se = cfs_rq->last;

1709 1710 1711 1712 1713 1714
	/*
	 * Someone really wants this to run. If it's not unfair, run it.
	 */
	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
		se = cfs_rq->next;

1715
	clear_buddies(cfs_rq, se);
P
Peter Zijlstra 已提交
1716 1717

	return se;
1718 1719
}

1720 1721
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);

1722
static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1723 1724 1725 1726 1727 1728
{
	/*
	 * If still on the runqueue then deactivate_task()
	 * was not called and update_curr() has to be done:
	 */
	if (prev->on_rq)
1729
		update_curr(cfs_rq);
1730

1731 1732 1733
	/* throttle cfs_rqs exceeding runtime */
	check_cfs_rq_runtime(cfs_rq);

P
Peter Zijlstra 已提交
1734
	check_spread(cfs_rq, prev);
1735
	if (prev->on_rq) {
1736
		update_stats_wait_start(cfs_rq, prev);
1737 1738
		/* Put 'current' back into the tree. */
		__enqueue_entity(cfs_rq, prev);
1739
		/* in !on_rq case, update occurred at dequeue */
1740
		update_entity_load_avg(prev, 1);
1741
	}
1742
	cfs_rq->curr = NULL;
1743 1744
}

P
Peter Zijlstra 已提交
1745 1746
static void
entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1747 1748
{
	/*
1749
	 * Update run-time statistics of the 'current'.
1750
	 */
1751
	update_curr(cfs_rq);
1752

1753 1754 1755
	/*
	 * Ensure that runnable average is periodically updated.
	 */
1756
	update_entity_load_avg(curr, 1);
1757
	update_cfs_rq_blocked_load(cfs_rq, 1);
1758

1759 1760 1761 1762 1763
	/*
	 * Update share accounting for long-running entities.
	 */
	update_entity_shares_tick(cfs_rq);

P
Peter Zijlstra 已提交
1764 1765 1766 1767 1768
#ifdef CONFIG_SCHED_HRTICK
	/*
	 * queued ticks are scheduled to match the slice, so don't bother
	 * validating it and just reschedule.
	 */
1769 1770 1771 1772
	if (queued) {
		resched_task(rq_of(cfs_rq)->curr);
		return;
	}
P
Peter Zijlstra 已提交
1773 1774 1775 1776 1777 1778 1779 1780
	/*
	 * don't let the period tick interfere with the hrtick preemption
	 */
	if (!sched_feat(DOUBLE_TICK) &&
			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
		return;
#endif

Y
Yong Zhang 已提交
1781
	if (cfs_rq->nr_running > 1)
I
Ingo Molnar 已提交
1782
		check_preempt_tick(cfs_rq, curr);
1783 1784
}

1785 1786 1787 1788 1789 1790

/**************************************************
 * CFS bandwidth control machinery
 */

#ifdef CONFIG_CFS_BANDWIDTH
1791 1792

#ifdef HAVE_JUMP_LABEL
1793
static struct static_key __cfs_bandwidth_used;
1794 1795 1796

static inline bool cfs_bandwidth_used(void)
{
1797
	return static_key_false(&__cfs_bandwidth_used);
1798 1799 1800 1801 1802 1803
}

void account_cfs_bandwidth_used(int enabled, int was_enabled)
{
	/* only need to count groups transitioning between enabled/!enabled */
	if (enabled && !was_enabled)
1804
		static_key_slow_inc(&__cfs_bandwidth_used);
1805
	else if (!enabled && was_enabled)
1806
		static_key_slow_dec(&__cfs_bandwidth_used);
1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
}
#else /* HAVE_JUMP_LABEL */
static bool cfs_bandwidth_used(void)
{
	return true;
}

void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
#endif /* HAVE_JUMP_LABEL */

1817 1818 1819 1820 1821 1822 1823 1824
/*
 * default period for cfs group bandwidth.
 * default: 0.1s, units: nanoseconds
 */
static inline u64 default_cfs_period(void)
{
	return 100000000ULL;
}
1825 1826 1827 1828 1829 1830

static inline u64 sched_cfs_bandwidth_slice(void)
{
	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
}

P
Paul Turner 已提交
1831 1832 1833 1834 1835 1836 1837
/*
 * Replenish runtime according to assigned quota and update expiration time.
 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
 * additional synchronization around rq->lock.
 *
 * requires cfs_b->lock
 */
1838
void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
P
Paul Turner 已提交
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
{
	u64 now;

	if (cfs_b->quota == RUNTIME_INF)
		return;

	now = sched_clock_cpu(smp_processor_id());
	cfs_b->runtime = cfs_b->quota;
	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
}

1850 1851 1852 1853 1854
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
{
	return &tg->cfs_bandwidth;
}

1855 1856 1857 1858 1859 1860 1861 1862 1863
/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
{
	if (unlikely(cfs_rq->throttle_count))
		return cfs_rq->throttled_clock_task;

	return rq_of(cfs_rq)->clock_task - cfs_rq->throttled_clock_task_time;
}

1864 1865
/* returns 0 on failure to allocate runtime */
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1866 1867 1868
{
	struct task_group *tg = cfs_rq->tg;
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
P
Paul Turner 已提交
1869
	u64 amount = 0, min_amount, expires;
1870 1871 1872 1873 1874 1875 1876

	/* note: this is a positive sum as runtime_remaining <= 0 */
	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;

	raw_spin_lock(&cfs_b->lock);
	if (cfs_b->quota == RUNTIME_INF)
		amount = min_amount;
1877
	else {
P
Paul Turner 已提交
1878 1879 1880 1881 1882 1883 1884 1885
		/*
		 * If the bandwidth pool has become inactive, then at least one
		 * period must have elapsed since the last consumption.
		 * Refresh the global state and ensure bandwidth timer becomes
		 * active.
		 */
		if (!cfs_b->timer_active) {
			__refill_cfs_bandwidth_runtime(cfs_b);
1886
			__start_cfs_bandwidth(cfs_b);
P
Paul Turner 已提交
1887
		}
1888 1889 1890 1891 1892 1893

		if (cfs_b->runtime > 0) {
			amount = min(cfs_b->runtime, min_amount);
			cfs_b->runtime -= amount;
			cfs_b->idle = 0;
		}
1894
	}
P
Paul Turner 已提交
1895
	expires = cfs_b->runtime_expires;
1896 1897 1898
	raw_spin_unlock(&cfs_b->lock);

	cfs_rq->runtime_remaining += amount;
P
Paul Turner 已提交
1899 1900 1901 1902 1903 1904 1905
	/*
	 * we may have advanced our local expiration to account for allowed
	 * spread between our sched_clock and the one on which runtime was
	 * issued.
	 */
	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
		cfs_rq->runtime_expires = expires;
1906 1907

	return cfs_rq->runtime_remaining > 0;
1908 1909
}

P
Paul Turner 已提交
1910 1911 1912 1913 1914
/*
 * Note: This depends on the synchronization provided by sched_clock and the
 * fact that rq->clock snapshots this value.
 */
static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1915
{
P
Paul Turner 已提交
1916 1917 1918 1919 1920
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	struct rq *rq = rq_of(cfs_rq);

	/* if the deadline is ahead of our clock, nothing to do */
	if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
1921 1922
		return;

P
Paul Turner 已提交
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
	if (cfs_rq->runtime_remaining < 0)
		return;

	/*
	 * If the local deadline has passed we have to consider the
	 * possibility that our sched_clock is 'fast' and the global deadline
	 * has not truly expired.
	 *
	 * Fortunately we can check determine whether this the case by checking
	 * whether the global deadline has advanced.
	 */

	if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
		/* extend local deadline, drift is bounded above by 2 ticks */
		cfs_rq->runtime_expires += TICK_NSEC;
	} else {
		/* global deadline is ahead, expiration has passed */
		cfs_rq->runtime_remaining = 0;
	}
}

static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
				     unsigned long delta_exec)
{
	/* dock delta_exec before expiring quota (as it could span periods) */
1948
	cfs_rq->runtime_remaining -= delta_exec;
P
Paul Turner 已提交
1949 1950 1951
	expire_cfs_rq_runtime(cfs_rq);

	if (likely(cfs_rq->runtime_remaining > 0))
1952 1953
		return;

1954 1955 1956 1957 1958 1959
	/*
	 * if we're unable to extend our runtime we resched so that the active
	 * hierarchy can be throttled
	 */
	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
		resched_task(rq_of(cfs_rq)->curr);
1960 1961
}

1962 1963
static __always_inline
void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
1964
{
1965
	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
1966 1967 1968 1969 1970
		return;

	__account_cfs_rq_runtime(cfs_rq, delta_exec);
}

1971 1972
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
1973
	return cfs_bandwidth_used() && cfs_rq->throttled;
1974 1975
}

1976 1977 1978
/* check whether cfs_rq, or any parent, is throttled */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
1979
	return cfs_bandwidth_used() && cfs_rq->throttle_count;
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
}

/*
 * Ensure that neither of the group entities corresponding to src_cpu or
 * dest_cpu are members of a throttled hierarchy when performing group
 * load-balance operations.
 */
static inline int throttled_lb_pair(struct task_group *tg,
				    int src_cpu, int dest_cpu)
{
	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;

	src_cfs_rq = tg->cfs_rq[src_cpu];
	dest_cfs_rq = tg->cfs_rq[dest_cpu];

	return throttled_hierarchy(src_cfs_rq) ||
	       throttled_hierarchy(dest_cfs_rq);
}

/* updated child weight may affect parent so we have to do this bottom up */
static int tg_unthrottle_up(struct task_group *tg, void *data)
{
	struct rq *rq = data;
	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];

	cfs_rq->throttle_count--;
#ifdef CONFIG_SMP
	if (!cfs_rq->throttle_count) {
		u64 delta = rq->clock_task - cfs_rq->load_stamp;

		/* leaving throttled state, advance shares averaging windows */
		cfs_rq->load_stamp += delta;
		cfs_rq->load_last += delta;

2014 2015 2016 2017
		/* adjust cfs_rq_clock_task() */
		cfs_rq->throttled_clock_task_time += rq->clock_task -
					     cfs_rq->throttled_clock_task;

2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
		/* update entity weight now that we are on_rq again */
		update_cfs_shares(cfs_rq);
	}
#endif

	return 0;
}

static int tg_throttle_down(struct task_group *tg, void *data)
{
	struct rq *rq = data;
	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];

	/* group is entering throttled state, record last load */
2032
	if (!cfs_rq->throttle_count) {
2033
		update_cfs_load(cfs_rq, 0);
2034 2035
		cfs_rq->throttled_clock_task = rq->clock_task;
	}
2036 2037 2038 2039 2040
	cfs_rq->throttle_count++;

	return 0;
}

2041
static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
2042 2043 2044 2045 2046 2047 2048 2049
{
	struct rq *rq = rq_of(cfs_rq);
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	struct sched_entity *se;
	long task_delta, dequeue = 1;

	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];

2050
	/* freeze hierarchy runnable averages while throttled */
2051 2052 2053
	rcu_read_lock();
	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
	rcu_read_unlock();
2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073

	task_delta = cfs_rq->h_nr_running;
	for_each_sched_entity(se) {
		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
		/* throttled entity or throttle-on-deactivate */
		if (!se->on_rq)
			break;

		if (dequeue)
			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
		qcfs_rq->h_nr_running -= task_delta;

		if (qcfs_rq->load.weight)
			dequeue = 0;
	}

	if (!se)
		rq->nr_running -= task_delta;

	cfs_rq->throttled = 1;
2074
	cfs_rq->throttled_clock = rq->clock;
2075 2076 2077 2078 2079
	raw_spin_lock(&cfs_b->lock);
	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
	raw_spin_unlock(&cfs_b->lock);
}

2080
void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
{
	struct rq *rq = rq_of(cfs_rq);
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	struct sched_entity *se;
	int enqueue = 1;
	long task_delta;

	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];

	cfs_rq->throttled = 0;
	raw_spin_lock(&cfs_b->lock);
2092
	cfs_b->throttled_time += rq->clock - cfs_rq->throttled_clock;
2093 2094 2095
	list_del_rcu(&cfs_rq->throttled_list);
	raw_spin_unlock(&cfs_b->lock);

2096 2097 2098 2099
	update_rq_clock(rq);
	/* update hierarchical throttle state */
	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);

2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
	if (!cfs_rq->load.weight)
		return;

	task_delta = cfs_rq->h_nr_running;
	for_each_sched_entity(se) {
		if (se->on_rq)
			enqueue = 0;

		cfs_rq = cfs_rq_of(se);
		if (enqueue)
			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
		cfs_rq->h_nr_running += task_delta;

		if (cfs_rq_throttled(cfs_rq))
			break;
	}

	if (!se)
		rq->nr_running += task_delta;

	/* determine whether we need to wake up potentially idle cpu */
	if (rq->curr == rq->idle && rq->cfs.nr_running)
		resched_task(rq->curr);
}

static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
		u64 remaining, u64 expires)
{
	struct cfs_rq *cfs_rq;
	u64 runtime = remaining;

	rcu_read_lock();
	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
				throttled_list) {
		struct rq *rq = rq_of(cfs_rq);

		raw_spin_lock(&rq->lock);
		if (!cfs_rq_throttled(cfs_rq))
			goto next;

		runtime = -cfs_rq->runtime_remaining + 1;
		if (runtime > remaining)
			runtime = remaining;
		remaining -= runtime;

		cfs_rq->runtime_remaining += runtime;
		cfs_rq->runtime_expires = expires;

		/* we check whether we're throttled above */
		if (cfs_rq->runtime_remaining > 0)
			unthrottle_cfs_rq(cfs_rq);

next:
		raw_spin_unlock(&rq->lock);

		if (!remaining)
			break;
	}
	rcu_read_unlock();

	return remaining;
}

2163 2164 2165 2166 2167 2168 2169 2170
/*
 * Responsible for refilling a task_group's bandwidth and unthrottling its
 * cfs_rqs as appropriate. If there has been no activity within the last
 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
 * used to track this state.
 */
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
{
2171 2172
	u64 runtime, runtime_expires;
	int idle = 1, throttled;
2173 2174 2175 2176 2177 2178

	raw_spin_lock(&cfs_b->lock);
	/* no need to continue the timer with no bandwidth constraint */
	if (cfs_b->quota == RUNTIME_INF)
		goto out_unlock;

2179 2180 2181
	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
	/* idle depends on !throttled (for the case of a large deficit) */
	idle = cfs_b->idle && !throttled;
2182
	cfs_b->nr_periods += overrun;
2183

P
Paul Turner 已提交
2184 2185 2186 2187 2188 2189
	/* if we're going inactive then everything else can be deferred */
	if (idle)
		goto out_unlock;

	__refill_cfs_bandwidth_runtime(cfs_b);

2190 2191 2192 2193 2194 2195
	if (!throttled) {
		/* mark as potentially idle for the upcoming period */
		cfs_b->idle = 1;
		goto out_unlock;
	}

2196 2197 2198
	/* account preceding periods in which throttling occurred */
	cfs_b->nr_throttled += overrun;

2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
	/*
	 * There are throttled entities so we must first use the new bandwidth
	 * to unthrottle them before making it generally available.  This
	 * ensures that all existing debts will be paid before a new cfs_rq is
	 * allowed to run.
	 */
	runtime = cfs_b->runtime;
	runtime_expires = cfs_b->runtime_expires;
	cfs_b->runtime = 0;

	/*
	 * This check is repeated as we are holding onto the new bandwidth
	 * while we unthrottle.  This can potentially race with an unthrottled
	 * group trying to acquire new bandwidth from the global pool.
	 */
	while (throttled && runtime > 0) {
		raw_spin_unlock(&cfs_b->lock);
		/* we can't nest cfs_b->lock while distributing bandwidth */
		runtime = distribute_cfs_runtime(cfs_b, runtime,
						 runtime_expires);
		raw_spin_lock(&cfs_b->lock);

		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
	}
2223

2224 2225 2226 2227 2228 2229 2230 2231 2232
	/* return (any) remaining runtime */
	cfs_b->runtime = runtime;
	/*
	 * While we are ensured activity in the period following an
	 * unthrottle, this also covers the case in which the new bandwidth is
	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
	 * timer to remain active while there are any throttled entities.)
	 */
	cfs_b->idle = 0;
2233 2234 2235 2236 2237 2238 2239
out_unlock:
	if (idle)
		cfs_b->timer_active = 0;
	raw_spin_unlock(&cfs_b->lock);

	return idle;
}
2240

2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
/* a cfs_rq won't donate quota below this amount */
static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
/* minimum remaining period time to redistribute slack quota */
static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
/* how long we wait to gather additional slack before distributing */
static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;

/* are we near the end of the current quota period? */
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
{
	struct hrtimer *refresh_timer = &cfs_b->period_timer;
	u64 remaining;

	/* if the call-back is running a quota refresh is already occurring */
	if (hrtimer_callback_running(refresh_timer))
		return 1;

	/* is a quota refresh about to occur? */
	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
	if (remaining < min_expire)
		return 1;

	return 0;
}

static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
{
	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;

	/* if there's a quota refresh soon don't bother with slack */
	if (runtime_refresh_within(cfs_b, min_left))
		return;

	start_bandwidth_timer(&cfs_b->slack_timer,
				ns_to_ktime(cfs_bandwidth_slack_period));
}

/* we know any runtime found here is valid as update_curr() precedes return */
static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;

	if (slack_runtime <= 0)
		return;

	raw_spin_lock(&cfs_b->lock);
	if (cfs_b->quota != RUNTIME_INF &&
	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
		cfs_b->runtime += slack_runtime;

		/* we are under rq->lock, defer unthrottling using a timer */
		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
		    !list_empty(&cfs_b->throttled_cfs_rq))
			start_cfs_slack_bandwidth(cfs_b);
	}
	raw_spin_unlock(&cfs_b->lock);

	/* even if it's not valid for return we don't want to try again */
	cfs_rq->runtime_remaining -= slack_runtime;
}

static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
2305 2306 2307
	if (!cfs_bandwidth_used())
		return;

2308
	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345
		return;

	__return_cfs_rq_runtime(cfs_rq);
}

/*
 * This is done with a timer (instead of inline with bandwidth return) since
 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
 */
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
{
	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
	u64 expires;

	/* confirm we're still not at a refresh boundary */
	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
		return;

	raw_spin_lock(&cfs_b->lock);
	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
		runtime = cfs_b->runtime;
		cfs_b->runtime = 0;
	}
	expires = cfs_b->runtime_expires;
	raw_spin_unlock(&cfs_b->lock);

	if (!runtime)
		return;

	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);

	raw_spin_lock(&cfs_b->lock);
	if (expires == cfs_b->runtime_expires)
		cfs_b->runtime = runtime;
	raw_spin_unlock(&cfs_b->lock);
}

2346 2347 2348 2349 2350 2351 2352
/*
 * When a group wakes up we want to make sure that its quota is not already
 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
 * runtime as update_curr() throttling can not not trigger until it's on-rq.
 */
static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
{
2353 2354 2355
	if (!cfs_bandwidth_used())
		return;

2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
	/* an active group must be handled by the update_curr()->put() path */
	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
		return;

	/* ensure the group is not already throttled */
	if (cfs_rq_throttled(cfs_rq))
		return;

	/* update runtime allocation */
	account_cfs_rq_runtime(cfs_rq, 0);
	if (cfs_rq->runtime_remaining <= 0)
		throttle_cfs_rq(cfs_rq);
}

/* conditionally throttle active cfs_rq's from put_prev_entity() */
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
2373 2374 2375
	if (!cfs_bandwidth_used())
		return;

2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387
	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
		return;

	/*
	 * it's possible for a throttled entity to be forced into a running
	 * state (e.g. set_curr_task), in this case we're finished.
	 */
	if (cfs_rq_throttled(cfs_rq))
		return;

	throttle_cfs_rq(cfs_rq);
}
2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472

static inline u64 default_cfs_period(void);
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);

static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
{
	struct cfs_bandwidth *cfs_b =
		container_of(timer, struct cfs_bandwidth, slack_timer);
	do_sched_cfs_slack_timer(cfs_b);

	return HRTIMER_NORESTART;
}

static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
	struct cfs_bandwidth *cfs_b =
		container_of(timer, struct cfs_bandwidth, period_timer);
	ktime_t now;
	int overrun;
	int idle = 0;

	for (;;) {
		now = hrtimer_cb_get_time(timer);
		overrun = hrtimer_forward(timer, now, cfs_b->period);

		if (!overrun)
			break;

		idle = do_sched_cfs_period_timer(cfs_b, overrun);
	}

	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}

void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
	raw_spin_lock_init(&cfs_b->lock);
	cfs_b->runtime = 0;
	cfs_b->quota = RUNTIME_INF;
	cfs_b->period = ns_to_ktime(default_cfs_period());

	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	cfs_b->period_timer.function = sched_cfs_period_timer;
	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	cfs_b->slack_timer.function = sched_cfs_slack_timer;
}

static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
	cfs_rq->runtime_enabled = 0;
	INIT_LIST_HEAD(&cfs_rq->throttled_list);
}

/* requires cfs_b->lock, may release to reprogram timer */
void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
	/*
	 * The timer may be active because we're trying to set a new bandwidth
	 * period or because we're racing with the tear-down path
	 * (timer_active==0 becomes visible before the hrtimer call-back
	 * terminates).  In either case we ensure that it's re-programmed
	 */
	while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
		raw_spin_unlock(&cfs_b->lock);
		/* ensure cfs_b->lock is available while we wait */
		hrtimer_cancel(&cfs_b->period_timer);

		raw_spin_lock(&cfs_b->lock);
		/* if someone else restarted the timer then we're done */
		if (cfs_b->timer_active)
			return;
	}

	cfs_b->timer_active = 1;
	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
}

static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
	hrtimer_cancel(&cfs_b->period_timer);
	hrtimer_cancel(&cfs_b->slack_timer);
}

2473
static void unthrottle_offline_cfs_rqs(struct rq *rq)
2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493
{
	struct cfs_rq *cfs_rq;

	for_each_leaf_cfs_rq(rq, cfs_rq) {
		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);

		if (!cfs_rq->runtime_enabled)
			continue;

		/*
		 * clock_task is not advancing so we just need to make sure
		 * there's some valid quota amount
		 */
		cfs_rq->runtime_remaining = cfs_b->quota;
		if (cfs_rq_throttled(cfs_rq))
			unthrottle_cfs_rq(cfs_rq);
	}
}

#else /* CONFIG_CFS_BANDWIDTH */
2494 2495 2496 2497 2498 2499 2500
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
{
	return rq_of(cfs_rq)->clock_task;
}

static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
				     unsigned long delta_exec) {}
2501 2502
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2503
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2504 2505 2506 2507 2508

static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
	return 0;
}
2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519

static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
	return 0;
}

static inline int throttled_lb_pair(struct task_group *tg,
				    int src_cpu, int dest_cpu)
{
	return 0;
}
2520 2521 2522 2523 2524

void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}

#ifdef CONFIG_FAIR_GROUP_SCHED
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2525 2526
#endif

2527 2528 2529 2530 2531
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
{
	return NULL;
}
static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2532
static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2533 2534 2535

#endif /* CONFIG_CFS_BANDWIDTH */

2536 2537 2538 2539
/**************************************************
 * CFS operations on tasks:
 */

P
Peter Zijlstra 已提交
2540 2541 2542 2543 2544 2545 2546 2547
#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);

	WARN_ON(task_rq(p) != rq);

2548
	if (cfs_rq->nr_running > 1) {
P
Peter Zijlstra 已提交
2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
		u64 slice = sched_slice(cfs_rq, se);
		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
		s64 delta = slice - ran;

		if (delta < 0) {
			if (rq->curr == p)
				resched_task(p);
			return;
		}

		/*
		 * Don't schedule slices shorter than 10000ns, that just
		 * doesn't make sense. Rely on vruntime for fairness.
		 */
2563
		if (rq->curr != p)
2564
			delta = max_t(s64, 10000LL, delta);
P
Peter Zijlstra 已提交
2565

2566
		hrtick_start(rq, delta);
P
Peter Zijlstra 已提交
2567 2568
	}
}
2569 2570 2571 2572 2573 2574 2575 2576 2577 2578

/*
 * called from enqueue/dequeue and updates the hrtick when the
 * current task is from our class and nr_running is low enough
 * to matter.
 */
static void hrtick_update(struct rq *rq)
{
	struct task_struct *curr = rq->curr;

2579
	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
2580 2581 2582 2583 2584
		return;

	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
		hrtick_start_fair(rq, curr);
}
2585
#else /* !CONFIG_SCHED_HRTICK */
P
Peter Zijlstra 已提交
2586 2587 2588 2589
static inline void
hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
}
2590 2591 2592 2593

static inline void hrtick_update(struct rq *rq)
{
}
P
Peter Zijlstra 已提交
2594 2595
#endif

2596 2597 2598 2599 2600
/*
 * The enqueue_task method is called before nr_running is
 * increased. Here we update the fair scheduling stats and
 * then put the task into the rbtree:
 */
2601
static void
2602
enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2603 2604
{
	struct cfs_rq *cfs_rq;
2605
	struct sched_entity *se = &p->se;
2606 2607

	for_each_sched_entity(se) {
2608
		if (se->on_rq)
2609 2610
			break;
		cfs_rq = cfs_rq_of(se);
2611
		enqueue_entity(cfs_rq, se, flags);
2612 2613 2614 2615 2616 2617 2618 2619 2620

		/*
		 * end evaluation on encountering a throttled cfs_rq
		 *
		 * note: in the case of encountering a throttled cfs_rq we will
		 * post the final h_nr_running increment below.
		*/
		if (cfs_rq_throttled(cfs_rq))
			break;
2621
		cfs_rq->h_nr_running++;
2622

2623
		flags = ENQUEUE_WAKEUP;
2624
	}
P
Peter Zijlstra 已提交
2625

P
Peter Zijlstra 已提交
2626
	for_each_sched_entity(se) {
2627
		cfs_rq = cfs_rq_of(se);
2628
		cfs_rq->h_nr_running++;
P
Peter Zijlstra 已提交
2629

2630 2631 2632
		if (cfs_rq_throttled(cfs_rq))
			break;

2633
		update_cfs_load(cfs_rq, 0);
2634
		update_cfs_shares(cfs_rq);
2635
		update_entity_load_avg(se, 1);
P
Peter Zijlstra 已提交
2636 2637
	}

2638 2639
	if (!se) {
		update_rq_runnable_avg(rq, rq->nr_running);
2640
		inc_nr_running(rq);
2641
	}
2642
	hrtick_update(rq);
2643 2644
}

2645 2646
static void set_next_buddy(struct sched_entity *se);

2647 2648 2649 2650 2651
/*
 * The dequeue_task method is called before nr_running is
 * decreased. We remove the task from the rbtree and
 * update the fair scheduling stats:
 */
2652
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2653 2654
{
	struct cfs_rq *cfs_rq;
2655
	struct sched_entity *se = &p->se;
2656
	int task_sleep = flags & DEQUEUE_SLEEP;
2657 2658 2659

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
2660
		dequeue_entity(cfs_rq, se, flags);
2661 2662 2663 2664 2665 2666 2667 2668 2669

		/*
		 * end evaluation on encountering a throttled cfs_rq
		 *
		 * note: in the case of encountering a throttled cfs_rq we will
		 * post the final h_nr_running decrement below.
		*/
		if (cfs_rq_throttled(cfs_rq))
			break;
2670
		cfs_rq->h_nr_running--;
P
Peter Zijlstra 已提交
2671

2672
		/* Don't dequeue parent if it has other entities besides us */
2673 2674 2675 2676 2677 2678 2679
		if (cfs_rq->load.weight) {
			/*
			 * Bias pick_next to pick a task from this cfs_rq, as
			 * p is sleeping when it is within its sched_slice.
			 */
			if (task_sleep && parent_entity(se))
				set_next_buddy(parent_entity(se));
2680 2681 2682

			/* avoid re-evaluating load for this entity */
			se = parent_entity(se);
2683
			break;
2684
		}
2685
		flags |= DEQUEUE_SLEEP;
2686
	}
P
Peter Zijlstra 已提交
2687

P
Peter Zijlstra 已提交
2688
	for_each_sched_entity(se) {
2689
		cfs_rq = cfs_rq_of(se);
2690
		cfs_rq->h_nr_running--;
P
Peter Zijlstra 已提交
2691

2692 2693 2694
		if (cfs_rq_throttled(cfs_rq))
			break;

2695
		update_cfs_load(cfs_rq, 0);
2696
		update_cfs_shares(cfs_rq);
2697
		update_entity_load_avg(se, 1);
P
Peter Zijlstra 已提交
2698 2699
	}

2700
	if (!se) {
2701
		dec_nr_running(rq);
2702 2703
		update_rq_runnable_avg(rq, 1);
	}
2704
	hrtick_update(rq);
2705 2706
}

2707
#ifdef CONFIG_SMP
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(const int cpu)
{
	return cpu_rq(cpu)->load.weight;
}

/*
 * Return a low guess at the load of a migration-source cpu weighted
 * according to the scheduling class and "nice" value.
 *
 * We want to under-estimate the load of migration sources, to
 * balance conservatively.
 */
static unsigned long source_load(int cpu, int type)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long total = weighted_cpuload(cpu);

	if (type == 0 || !sched_feat(LB_BIAS))
		return total;

	return min(rq->cpu_load[type-1], total);
}

/*
 * Return a high guess at the load of a migration-target cpu weighted
 * according to the scheduling class and "nice" value.
 */
static unsigned long target_load(int cpu, int type)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long total = weighted_cpuload(cpu);

	if (type == 0 || !sched_feat(LB_BIAS))
		return total;

	return max(rq->cpu_load[type-1], total);
}

static unsigned long power_of(int cpu)
{
	return cpu_rq(cpu)->cpu_power;
}

static unsigned long cpu_avg_load_per_task(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);

	if (nr_running)
		return rq->load.weight / nr_running;

	return 0;
}

2763

2764
static void task_waking_fair(struct task_struct *p)
2765 2766 2767
{
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2768 2769 2770 2771
	u64 min_vruntime;

#ifndef CONFIG_64BIT
	u64 min_vruntime_copy;
2772

2773 2774 2775 2776 2777 2778 2779 2780
	do {
		min_vruntime_copy = cfs_rq->min_vruntime_copy;
		smp_rmb();
		min_vruntime = cfs_rq->min_vruntime;
	} while (min_vruntime != min_vruntime_copy);
#else
	min_vruntime = cfs_rq->min_vruntime;
#endif
2781

2782
	se->vruntime -= min_vruntime;
2783 2784
}

2785
#ifdef CONFIG_FAIR_GROUP_SCHED
2786 2787 2788 2789 2790 2791
/*
 * effective_load() calculates the load change as seen from the root_task_group
 *
 * Adding load to a group doesn't make a group heavier, but can cause movement
 * of group shares between cpus. Assuming the shares were perfectly aligned one
 * can calculate the shift in shares.
2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834
 *
 * Calculate the effective load difference if @wl is added (subtracted) to @tg
 * on this @cpu and results in a total addition (subtraction) of @wg to the
 * total group weight.
 *
 * Given a runqueue weight distribution (rw_i) we can compute a shares
 * distribution (s_i) using:
 *
 *   s_i = rw_i / \Sum rw_j						(1)
 *
 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
 * shares distribution (s_i):
 *
 *   rw_i = {   2,   4,   1,   0 }
 *   s_i  = { 2/7, 4/7, 1/7,   0 }
 *
 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
 * task used to run on and the CPU the waker is running on), we need to
 * compute the effect of waking a task on either CPU and, in case of a sync
 * wakeup, compute the effect of the current task going to sleep.
 *
 * So for a change of @wl to the local @cpu with an overall group weight change
 * of @wl we can compute the new shares distribution (s'_i) using:
 *
 *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
 *
 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
 * differences in waking a task to CPU 0. The additional task changes the
 * weight and shares distributions like:
 *
 *   rw'_i = {   3,   4,   1,   0 }
 *   s'_i  = { 3/8, 4/8, 1/8,   0 }
 *
 * We can then compute the difference in effective weight by using:
 *
 *   dw_i = S * (s'_i - s_i)						(3)
 *
 * Where 'S' is the group weight as seen by its parent.
 *
 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
 * 4/7) times the weight of the group.
2835
 */
P
Peter Zijlstra 已提交
2836
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
2837
{
P
Peter Zijlstra 已提交
2838
	struct sched_entity *se = tg->se[cpu];
2839

2840
	if (!tg->parent)	/* the trivial, non-cgroup case */
2841 2842
		return wl;

P
Peter Zijlstra 已提交
2843
	for_each_sched_entity(se) {
2844
		long w, W;
P
Peter Zijlstra 已提交
2845

2846
		tg = se->my_q->tg;
2847

2848 2849 2850 2851
		/*
		 * W = @wg + \Sum rw_j
		 */
		W = wg + calc_tg_weight(tg, se->my_q);
P
Peter Zijlstra 已提交
2852

2853 2854 2855 2856
		/*
		 * w = rw_i + @wl
		 */
		w = se->my_q->load.weight + wl;
2857

2858 2859 2860 2861 2862
		/*
		 * wl = S * s'_i; see (2)
		 */
		if (W > 0 && w < W)
			wl = (w * tg->shares) / W;
2863 2864
		else
			wl = tg->shares;
2865

2866 2867 2868 2869 2870
		/*
		 * Per the above, wl is the new se->load.weight value; since
		 * those are clipped to [MIN_SHARES, ...) do so now. See
		 * calc_cfs_shares().
		 */
2871 2872
		if (wl < MIN_SHARES)
			wl = MIN_SHARES;
2873 2874 2875 2876

		/*
		 * wl = dw_i = S * (s'_i - s_i); see (3)
		 */
2877
		wl -= se->load.weight;
2878 2879 2880 2881 2882 2883 2884 2885

		/*
		 * Recursively apply this logic to all parent groups to compute
		 * the final effective load change on the root group. Since
		 * only the @tg group gets extra weight, all parent groups can
		 * only redistribute existing shares. @wl is the shift in shares
		 * resulting from this level per the above.
		 */
P
Peter Zijlstra 已提交
2886 2887
		wg = 0;
	}
2888

P
Peter Zijlstra 已提交
2889
	return wl;
2890 2891
}
#else
P
Peter Zijlstra 已提交
2892

2893 2894
static inline unsigned long effective_load(struct task_group *tg, int cpu,
		unsigned long wl, unsigned long wg)
P
Peter Zijlstra 已提交
2895
{
2896
	return wl;
2897
}
P
Peter Zijlstra 已提交
2898

2899 2900
#endif

2901
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
2902
{
2903
	s64 this_load, load;
2904
	int idx, this_cpu, prev_cpu;
2905
	unsigned long tl_per_task;
2906
	struct task_group *tg;
2907
	unsigned long weight;
2908
	int balanced;
2909

2910 2911 2912 2913 2914
	idx	  = sd->wake_idx;
	this_cpu  = smp_processor_id();
	prev_cpu  = task_cpu(p);
	load	  = source_load(prev_cpu, idx);
	this_load = target_load(this_cpu, idx);
2915

2916 2917 2918 2919 2920
	/*
	 * If sync wakeup then subtract the (maximum possible)
	 * effect of the currently running task from the load
	 * of the current CPU:
	 */
2921 2922 2923 2924
	if (sync) {
		tg = task_group(current);
		weight = current->se.load.weight;

2925
		this_load += effective_load(tg, this_cpu, -weight, -weight);
2926 2927
		load += effective_load(tg, prev_cpu, 0, -weight);
	}
2928

2929 2930
	tg = task_group(p);
	weight = p->se.load.weight;
2931

2932 2933
	/*
	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
2934 2935 2936
	 * due to the sync cause above having dropped this_load to 0, we'll
	 * always have an imbalance, but there's really nothing you can do
	 * about that, so that's good too.
2937 2938 2939 2940
	 *
	 * Otherwise check if either cpus are near enough in load to allow this
	 * task to be woken on this_cpu.
	 */
2941 2942
	if (this_load > 0) {
		s64 this_eff_load, prev_eff_load;
2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955

		this_eff_load = 100;
		this_eff_load *= power_of(prev_cpu);
		this_eff_load *= this_load +
			effective_load(tg, this_cpu, weight, weight);

		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
		prev_eff_load *= power_of(this_cpu);
		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);

		balanced = this_eff_load <= prev_eff_load;
	} else
		balanced = true;
2956

2957
	/*
I
Ingo Molnar 已提交
2958 2959 2960
	 * If the currently running task will sleep within
	 * a reasonable amount of time then attract this newly
	 * woken task:
2961
	 */
2962 2963
	if (sync && balanced)
		return 1;
2964

2965
	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
2966 2967
	tl_per_task = cpu_avg_load_per_task(this_cpu);

2968 2969 2970
	if (balanced ||
	    (this_load <= load &&
	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
2971 2972 2973 2974 2975
		/*
		 * This domain has SD_WAKE_AFFINE and
		 * p is cache cold in this domain, and
		 * there is no bad imbalance.
		 */
2976
		schedstat_inc(sd, ttwu_move_affine);
2977
		schedstat_inc(p, se.statistics.nr_wakeups_affine);
2978 2979 2980 2981 2982 2983

		return 1;
	}
	return 0;
}

2984 2985 2986 2987 2988
/*
 * find_idlest_group finds and returns the least busy CPU group within the
 * domain.
 */
static struct sched_group *
P
Peter Zijlstra 已提交
2989
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
2990
		  int this_cpu, int load_idx)
2991
{
2992
	struct sched_group *idlest = NULL, *group = sd->groups;
2993 2994
	unsigned long min_load = ULONG_MAX, this_load = 0;
	int imbalance = 100 + (sd->imbalance_pct-100)/2;
2995

2996 2997 2998 2999
	do {
		unsigned long load, avg_load;
		int local_group;
		int i;
3000

3001 3002
		/* Skip over this group if it has no CPUs allowed */
		if (!cpumask_intersects(sched_group_cpus(group),
3003
					tsk_cpus_allowed(p)))
3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022
			continue;

		local_group = cpumask_test_cpu(this_cpu,
					       sched_group_cpus(group));

		/* Tally up the load of all CPUs in the group */
		avg_load = 0;

		for_each_cpu(i, sched_group_cpus(group)) {
			/* Bias balancing toward cpus of our domain */
			if (local_group)
				load = source_load(i, load_idx);
			else
				load = target_load(i, load_idx);

			avg_load += load;
		}

		/* Adjust by relative CPU power of the group */
3023
		avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048

		if (local_group) {
			this_load = avg_load;
		} else if (avg_load < min_load) {
			min_load = avg_load;
			idlest = group;
		}
	} while (group = group->next, group != sd->groups);

	if (!idlest || 100*this_load < imbalance*min_load)
		return NULL;
	return idlest;
}

/*
 * find_idlest_cpu - find the idlest cpu among the cpus in group.
 */
static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
	unsigned long load, min_load = ULONG_MAX;
	int idlest = -1;
	int i;

	/* Traverse only the allowed CPUs */
3049
	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
3050 3051 3052 3053 3054
		load = weighted_cpuload(i);

		if (load < min_load || (load == min_load && i == this_cpu)) {
			min_load = load;
			idlest = i;
3055 3056 3057
		}
	}

3058 3059
	return idlest;
}
3060

3061 3062 3063
/*
 * Try and locate an idle CPU in the sched_domain.
 */
3064
static int select_idle_sibling(struct task_struct *p, int target)
3065 3066 3067
{
	int cpu = smp_processor_id();
	int prev_cpu = task_cpu(p);
3068
	struct sched_domain *sd;
3069 3070
	struct sched_group *sg;
	int i;
3071 3072

	/*
3073 3074
	 * If the task is going to be woken-up on this cpu and if it is
	 * already idle, then it is the right target.
3075
	 */
3076 3077 3078 3079 3080 3081 3082 3083
	if (target == cpu && idle_cpu(cpu))
		return cpu;

	/*
	 * If the task is going to be woken-up on the cpu where it previously
	 * ran and if it is currently idle, then it the right target.
	 */
	if (target == prev_cpu && idle_cpu(prev_cpu))
3084
		return prev_cpu;
3085 3086

	/*
3087
	 * Otherwise, iterate the domains and find an elegible idle cpu.
3088
	 */
3089
	sd = rcu_dereference(per_cpu(sd_llc, target));
3090
	for_each_lower_domain(sd) {
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100
		sg = sd->groups;
		do {
			if (!cpumask_intersects(sched_group_cpus(sg),
						tsk_cpus_allowed(p)))
				goto next;

			for_each_cpu(i, sched_group_cpus(sg)) {
				if (!idle_cpu(i))
					goto next;
			}
3101

3102 3103 3104 3105 3106 3107 3108 3109
			target = cpumask_first_and(sched_group_cpus(sg),
					tsk_cpus_allowed(p));
			goto done;
next:
			sg = sg->next;
		} while (sg != sd->groups);
	}
done:
3110 3111 3112
	return target;
}

3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123
/*
 * sched_balance_self: balance the current task (running on cpu) in domains
 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
 * SD_BALANCE_EXEC.
 *
 * Balance, ie. select the least loaded group.
 *
 * Returns the target CPU number, or the same CPU if no balancing is needed.
 *
 * preempt must be disabled.
 */
3124
static int
3125
select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
3126
{
3127
	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
3128 3129 3130
	int cpu = smp_processor_id();
	int prev_cpu = task_cpu(p);
	int new_cpu = cpu;
3131
	int want_affine = 0;
3132
	int sync = wake_flags & WF_SYNC;
3133

3134
	if (p->nr_cpus_allowed == 1)
3135 3136
		return prev_cpu;

3137
	if (sd_flag & SD_BALANCE_WAKE) {
3138
		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
3139 3140 3141
			want_affine = 1;
		new_cpu = prev_cpu;
	}
3142

3143
	rcu_read_lock();
3144
	for_each_domain(cpu, tmp) {
3145 3146 3147
		if (!(tmp->flags & SD_LOAD_BALANCE))
			continue;

3148
		/*
3149 3150
		 * If both cpu and prev_cpu are part of this domain,
		 * cpu is a valid SD_WAKE_AFFINE target.
3151
		 */
3152 3153 3154
		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
			affine_sd = tmp;
3155
			break;
3156
		}
3157

3158
		if (tmp->flags & sd_flag)
3159 3160 3161
			sd = tmp;
	}

3162
	if (affine_sd) {
3163
		if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
3164 3165 3166 3167
			prev_cpu = cpu;

		new_cpu = select_idle_sibling(p, prev_cpu);
		goto unlock;
3168
	}
3169

3170
	while (sd) {
3171
		int load_idx = sd->forkexec_idx;
3172
		struct sched_group *group;
3173
		int weight;
3174

3175
		if (!(sd->flags & sd_flag)) {
3176 3177 3178
			sd = sd->child;
			continue;
		}
3179

3180 3181
		if (sd_flag & SD_BALANCE_WAKE)
			load_idx = sd->wake_idx;
3182

3183
		group = find_idlest_group(sd, p, cpu, load_idx);
3184 3185 3186 3187
		if (!group) {
			sd = sd->child;
			continue;
		}
I
Ingo Molnar 已提交
3188

3189
		new_cpu = find_idlest_cpu(group, p, cpu);
3190 3191 3192 3193
		if (new_cpu == -1 || new_cpu == cpu) {
			/* Now try balancing at a lower domain level of cpu */
			sd = sd->child;
			continue;
3194
		}
3195 3196 3197

		/* Now try balancing at a lower domain level of new_cpu */
		cpu = new_cpu;
3198
		weight = sd->span_weight;
3199 3200
		sd = NULL;
		for_each_domain(cpu, tmp) {
3201
			if (weight <= tmp->span_weight)
3202
				break;
3203
			if (tmp->flags & sd_flag)
3204 3205 3206
				sd = tmp;
		}
		/* while loop will break here if sd == NULL */
3207
	}
3208 3209
unlock:
	rcu_read_unlock();
3210

3211
	return new_cpu;
3212
}
3213 3214 3215 3216 3217 3218 3219 3220 3221 3222

/*
 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
 * cfs_rq_of(p) references at time of call are still valid and identify the
 * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
 * other assumptions, including the state of rq->lock, should be made.
 */
static void
migrate_task_rq_fair(struct task_struct *p, int next_cpu)
{
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);

	/*
	 * Load tracking: accumulate removed load so that it can be processed
	 * when we next update owning cfs_rq under rq->lock.  Tasks contribute
	 * to blocked load iff they have a positive decay-count.  It can never
	 * be negative here since on-rq tasks have decay-count == 0.
	 */
	if (se->avg.decay_count) {
		se->avg.decay_count = -__synchronize_entity_decay(se);
		atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
	}
3236
}
3237 3238
#endif /* CONFIG_SMP */

P
Peter Zijlstra 已提交
3239 3240
static unsigned long
wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
3241 3242 3243 3244
{
	unsigned long gran = sysctl_sched_wakeup_granularity;

	/*
P
Peter Zijlstra 已提交
3245 3246
	 * Since its curr running now, convert the gran from real-time
	 * to virtual-time in his units.
M
Mike Galbraith 已提交
3247 3248 3249 3250 3251 3252 3253 3254 3255
	 *
	 * By using 'se' instead of 'curr' we penalize light tasks, so
	 * they get preempted easier. That is, if 'se' < 'curr' then
	 * the resulting gran will be larger, therefore penalizing the
	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
	 * be smaller, again penalizing the lighter task.
	 *
	 * This is especially important for buddies when the leftmost
	 * task is higher priority than the buddy.
3256
	 */
3257
	return calc_delta_fair(gran, se);
3258 3259
}

3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281
/*
 * Should 'se' preempt 'curr'.
 *
 *             |s1
 *        |s2
 *   |s3
 *         g
 *      |<--->|c
 *
 *  w(c, s1) = -1
 *  w(c, s2) =  0
 *  w(c, s3) =  1
 *
 */
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
{
	s64 gran, vdiff = curr->vruntime - se->vruntime;

	if (vdiff <= 0)
		return -1;

P
Peter Zijlstra 已提交
3282
	gran = wakeup_gran(curr, se);
3283 3284 3285 3286 3287 3288
	if (vdiff > gran)
		return 1;

	return 0;
}

3289 3290
static void set_last_buddy(struct sched_entity *se)
{
3291 3292 3293 3294 3295
	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
		return;

	for_each_sched_entity(se)
		cfs_rq_of(se)->last = se;
3296 3297 3298 3299
}

static void set_next_buddy(struct sched_entity *se)
{
3300 3301 3302 3303 3304
	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
		return;

	for_each_sched_entity(se)
		cfs_rq_of(se)->next = se;
3305 3306
}

3307 3308
static void set_skip_buddy(struct sched_entity *se)
{
3309 3310
	for_each_sched_entity(se)
		cfs_rq_of(se)->skip = se;
3311 3312
}

3313 3314 3315
/*
 * Preempt the current task with a newly woken task if needed:
 */
3316
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
3317 3318
{
	struct task_struct *curr = rq->curr;
3319
	struct sched_entity *se = &curr->se, *pse = &p->se;
3320
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3321
	int scale = cfs_rq->nr_running >= sched_nr_latency;
3322
	int next_buddy_marked = 0;
3323

I
Ingo Molnar 已提交
3324 3325 3326
	if (unlikely(se == pse))
		return;

3327
	/*
3328
	 * This is possible from callers such as move_task(), in which we
3329 3330 3331 3332 3333 3334 3335
	 * unconditionally check_prempt_curr() after an enqueue (which may have
	 * lead to a throttle).  This both saves work and prevents false
	 * next-buddy nomination below.
	 */
	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
		return;

3336
	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
M
Mike Galbraith 已提交
3337
		set_next_buddy(pse);
3338 3339
		next_buddy_marked = 1;
	}
P
Peter Zijlstra 已提交
3340

3341 3342 3343
	/*
	 * We can come here with TIF_NEED_RESCHED already set from new task
	 * wake up path.
3344 3345 3346 3347 3348 3349
	 *
	 * Note: this also catches the edge-case of curr being in a throttled
	 * group (e.g. via set_curr_task), since update_curr() (in the
	 * enqueue of curr) will have resulted in resched being set.  This
	 * prevents us from potentially nominating it as a false LAST_BUDDY
	 * below.
3350 3351 3352 3353
	 */
	if (test_tsk_need_resched(curr))
		return;

3354 3355 3356 3357 3358
	/* Idle tasks are by definition preempted by non-idle tasks. */
	if (unlikely(curr->policy == SCHED_IDLE) &&
	    likely(p->policy != SCHED_IDLE))
		goto preempt;

3359
	/*
3360 3361
	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
	 * is driven by the tick):
3362
	 */
3363
	if (unlikely(p->policy != SCHED_NORMAL))
3364
		return;
3365

3366
	find_matching_se(&se, &pse);
3367
	update_curr(cfs_rq_of(se));
3368
	BUG_ON(!pse);
3369 3370 3371 3372 3373 3374 3375
	if (wakeup_preempt_entity(se, pse) == 1) {
		/*
		 * Bias pick_next to pick the sched entity that is
		 * triggering this preemption.
		 */
		if (!next_buddy_marked)
			set_next_buddy(pse);
3376
		goto preempt;
3377
	}
3378

3379
	return;
3380

3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396
preempt:
	resched_task(curr);
	/*
	 * Only set the backward buddy when the current task is still
	 * on the rq. This can happen when a wakeup gets interleaved
	 * with schedule on the ->pre_schedule() or idle_balance()
	 * point, either of which can * drop the rq lock.
	 *
	 * Also, during early boot the idle thread is in the fair class,
	 * for obvious reasons its a bad idea to schedule back to it.
	 */
	if (unlikely(!se->on_rq || curr == rq->idle))
		return;

	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
		set_last_buddy(se);
3397 3398
}

3399
static struct task_struct *pick_next_task_fair(struct rq *rq)
3400
{
P
Peter Zijlstra 已提交
3401
	struct task_struct *p;
3402 3403 3404
	struct cfs_rq *cfs_rq = &rq->cfs;
	struct sched_entity *se;

3405
	if (!cfs_rq->nr_running)
3406 3407 3408
		return NULL;

	do {
3409
		se = pick_next_entity(cfs_rq);
3410
		set_next_entity(cfs_rq, se);
3411 3412 3413
		cfs_rq = group_cfs_rq(se);
	} while (cfs_rq);

P
Peter Zijlstra 已提交
3414
	p = task_of(se);
3415 3416
	if (hrtick_enabled(rq))
		hrtick_start_fair(rq, p);
P
Peter Zijlstra 已提交
3417 3418

	return p;
3419 3420 3421 3422 3423
}

/*
 * Account for a descheduled task:
 */
3424
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
3425 3426 3427 3428 3429 3430
{
	struct sched_entity *se = &prev->se;
	struct cfs_rq *cfs_rq;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
3431
		put_prev_entity(cfs_rq, se);
3432 3433 3434
	}
}

3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459
/*
 * sched_yield() is very simple
 *
 * The magic of dealing with the ->skip buddy is in pick_next_entity.
 */
static void yield_task_fair(struct rq *rq)
{
	struct task_struct *curr = rq->curr;
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
	struct sched_entity *se = &curr->se;

	/*
	 * Are we the only task in the tree?
	 */
	if (unlikely(rq->nr_running == 1))
		return;

	clear_buddies(cfs_rq, se);

	if (curr->policy != SCHED_BATCH) {
		update_rq_clock(rq);
		/*
		 * Update run-time statistics of the 'current'.
		 */
		update_curr(cfs_rq);
3460 3461 3462 3463 3464 3465
		/*
		 * Tell update_rq_clock() that we've just updated,
		 * so we don't do microscopic update in schedule()
		 * and double the fastpath cost.
		 */
		 rq->skip_clock_update = 1;
3466 3467 3468 3469 3470
	}

	set_skip_buddy(se);
}

3471 3472 3473 3474
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
{
	struct sched_entity *se = &p->se;

3475 3476
	/* throttled hierarchies are not runnable */
	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
3477 3478 3479 3480 3481 3482 3483 3484 3485 3486
		return false;

	/* Tell the scheduler that we'd really like pse to run next. */
	set_next_buddy(se);

	yield_task_fair(rq);

	return true;
}

3487
#ifdef CONFIG_SMP
3488 3489 3490 3491
/**************************************************
 * Fair scheduling class load-balancing methods:
 */

3492 3493
static unsigned long __read_mostly max_load_balance_interval = HZ/10;

3494
#define LBF_ALL_PINNED	0x01
3495
#define LBF_NEED_BREAK	0x02
3496
#define LBF_SOME_PINNED 0x04
3497 3498 3499 3500 3501

struct lb_env {
	struct sched_domain	*sd;

	struct rq		*src_rq;
3502
	int			src_cpu;
3503 3504 3505 3506

	int			dst_cpu;
	struct rq		*dst_rq;

3507 3508
	struct cpumask		*dst_grpmask;
	int			new_dst_cpu;
3509
	enum cpu_idle_type	idle;
3510
	long			imbalance;
3511 3512 3513
	/* The set of CPUs under consideration for load-balancing */
	struct cpumask		*cpus;

3514
	unsigned int		flags;
3515 3516 3517 3518

	unsigned int		loop;
	unsigned int		loop_break;
	unsigned int		loop_max;
3519 3520
};

3521
/*
3522
 * move_task - move a task from one runqueue to another runqueue.
3523 3524
 * Both runqueues must be locked.
 */
3525
static void move_task(struct task_struct *p, struct lb_env *env)
3526
{
3527 3528 3529 3530
	deactivate_task(env->src_rq, p, 0);
	set_task_cpu(p, env->dst_cpu);
	activate_task(env->dst_rq, p, 0);
	check_preempt_curr(env->dst_rq, p, 0);
3531 3532
}

3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564
/*
 * Is this task likely cache-hot:
 */
static int
task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
{
	s64 delta;

	if (p->sched_class != &fair_sched_class)
		return 0;

	if (unlikely(p->policy == SCHED_IDLE))
		return 0;

	/*
	 * Buddy candidates are cache hot:
	 */
	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
			(&p->se == cfs_rq_of(&p->se)->next ||
			 &p->se == cfs_rq_of(&p->se)->last))
		return 1;

	if (sysctl_sched_migration_cost == -1)
		return 1;
	if (sysctl_sched_migration_cost == 0)
		return 0;

	delta = now - p->se.exec_start;

	return delta < (s64)sysctl_sched_migration_cost;
}

3565 3566 3567 3568
/*
 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
 */
static
3569
int can_migrate_task(struct task_struct *p, struct lb_env *env)
3570 3571 3572 3573 3574 3575 3576 3577
{
	int tsk_cache_hot = 0;
	/*
	 * We do not migrate tasks that are:
	 * 1) running (obviously), or
	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
	 * 3) are cache-hot on their current CPU.
	 */
3578
	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
3579 3580
		int new_dst_cpu;

3581
		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599

		/*
		 * Remember if this task can be migrated to any other cpu in
		 * our sched_group. We may want to revisit it if we couldn't
		 * meet load balance goals by pulling other tasks on src_cpu.
		 *
		 * Also avoid computing new_dst_cpu if we have already computed
		 * one in current iteration.
		 */
		if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
			return 0;

		new_dst_cpu = cpumask_first_and(env->dst_grpmask,
						tsk_cpus_allowed(p));
		if (new_dst_cpu < nr_cpu_ids) {
			env->flags |= LBF_SOME_PINNED;
			env->new_dst_cpu = new_dst_cpu;
		}
3600 3601
		return 0;
	}
3602 3603

	/* Record that we found atleast one task that could run on dst_cpu */
3604
	env->flags &= ~LBF_ALL_PINNED;
3605

3606
	if (task_running(env->src_rq, p)) {
3607
		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3608 3609 3610 3611 3612 3613 3614 3615 3616
		return 0;
	}

	/*
	 * Aggressive migration if:
	 * 1) task is cache cold, or
	 * 2) too many balance attempts have failed.
	 */

3617
	tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
3618
	if (!tsk_cache_hot ||
3619
		env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3620 3621
#ifdef CONFIG_SCHEDSTATS
		if (tsk_cache_hot) {
3622
			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
3623
			schedstat_inc(p, se.statistics.nr_forced_migrations);
3624 3625 3626 3627 3628 3629
		}
#endif
		return 1;
	}

	if (tsk_cache_hot) {
3630
		schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3631 3632 3633 3634 3635
		return 0;
	}
	return 1;
}

3636 3637 3638 3639 3640 3641 3642
/*
 * move_one_task tries to move exactly one task from busiest to this_rq, as
 * part of active balancing operations within "domain".
 * Returns 1 if successful and 0 otherwise.
 *
 * Called with both runqueues locked.
 */
3643
static int move_one_task(struct lb_env *env)
3644 3645 3646
{
	struct task_struct *p, *n;

3647 3648 3649
	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
		if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu))
			continue;
3650

3651 3652
		if (!can_migrate_task(p, env))
			continue;
3653

3654 3655 3656 3657 3658 3659 3660 3661
		move_task(p, env);
		/*
		 * Right now, this is only the second place move_task()
		 * is called, so we can safely collect move_task()
		 * stats here rather than inside move_task().
		 */
		schedstat_inc(env->sd, lb_gained[env->idle]);
		return 1;
3662 3663 3664 3665
	}
	return 0;
}

3666 3667
static unsigned long task_h_load(struct task_struct *p);

3668 3669
static const unsigned int sched_nr_migrate_break = 32;

3670
/*
3671
 * move_tasks tries to move up to imbalance weighted load from busiest to
3672 3673 3674 3675 3676 3677
 * this_rq, as part of a balancing operation within domain "sd".
 * Returns 1 if successful and 0 otherwise.
 *
 * Called with both runqueues locked.
 */
static int move_tasks(struct lb_env *env)
3678
{
3679 3680
	struct list_head *tasks = &env->src_rq->cfs_tasks;
	struct task_struct *p;
3681 3682
	unsigned long load;
	int pulled = 0;
3683

3684
	if (env->imbalance <= 0)
3685
		return 0;
3686

3687 3688
	while (!list_empty(tasks)) {
		p = list_first_entry(tasks, struct task_struct, se.group_node);
3689

3690 3691
		env->loop++;
		/* We've more or less seen every task there is, call it quits */
3692
		if (env->loop > env->loop_max)
3693
			break;
3694 3695

		/* take a breather every nr_migrate tasks */
3696
		if (env->loop > env->loop_break) {
3697
			env->loop_break += sched_nr_migrate_break;
3698
			env->flags |= LBF_NEED_BREAK;
3699
			break;
3700
		}
3701

3702
		if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3703 3704 3705
			goto next;

		load = task_h_load(p);
3706

3707
		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
3708 3709
			goto next;

3710
		if ((load / 2) > env->imbalance)
3711
			goto next;
3712

3713 3714
		if (!can_migrate_task(p, env))
			goto next;
3715

3716
		move_task(p, env);
3717
		pulled++;
3718
		env->imbalance -= load;
3719 3720

#ifdef CONFIG_PREEMPT
3721 3722 3723 3724 3725
		/*
		 * NEWIDLE balancing is a source of latency, so preemptible
		 * kernels will stop after the first task is pulled to minimize
		 * the critical section.
		 */
3726
		if (env->idle == CPU_NEWLY_IDLE)
3727
			break;
3728 3729
#endif

3730 3731 3732 3733
		/*
		 * We only want to steal up to the prescribed amount of
		 * weighted load.
		 */
3734
		if (env->imbalance <= 0)
3735
			break;
3736 3737 3738

		continue;
next:
3739
		list_move_tail(&p->se.group_node, tasks);
3740
	}
3741

3742
	/*
3743 3744 3745
	 * Right now, this is one of only two places move_task() is called,
	 * so we can safely collect move_task() stats here rather than
	 * inside move_task().
3746
	 */
3747
	schedstat_add(env->sd, lb_gained[env->idle], pulled);
3748

3749
	return pulled;
3750 3751
}

P
Peter Zijlstra 已提交
3752
#ifdef CONFIG_FAIR_GROUP_SCHED
3753 3754 3755
/*
 * update tg->load_weight by folding this cpu's load_avg
 */
3756
static int update_shares_cpu(struct task_group *tg, int cpu)
3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770
{
	struct cfs_rq *cfs_rq;
	unsigned long flags;
	struct rq *rq;

	if (!tg->se[cpu])
		return 0;

	rq = cpu_rq(cpu);
	cfs_rq = tg->cfs_rq[cpu];

	raw_spin_lock_irqsave(&rq->lock, flags);

	update_rq_clock(rq);
3771
	update_cfs_load(cfs_rq, 1);
3772
	update_cfs_rq_blocked_load(cfs_rq, 1);
3773 3774 3775 3776 3777

	/*
	 * We need to update shares after updating tg->load_weight in
	 * order to adjust the weight of groups with long running tasks.
	 */
3778
	update_cfs_shares(cfs_rq);
3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790

	raw_spin_unlock_irqrestore(&rq->lock, flags);

	return 0;
}

static void update_shares(int cpu)
{
	struct cfs_rq *cfs_rq;
	struct rq *rq = cpu_rq(cpu);

	rcu_read_lock();
3791 3792 3793 3794
	/*
	 * Iterates the task_group tree in a bottom up fashion, see
	 * list_add_leaf_cfs_rq() for details.
	 */
3795 3796 3797 3798 3799
	for_each_leaf_cfs_rq(rq, cfs_rq) {
		/* throttled entities do not contribute to load */
		if (throttled_hierarchy(cfs_rq))
			continue;

3800
		update_shares_cpu(cfs_rq->tg, cpu);
3801
	}
3802 3803 3804
	rcu_read_unlock();
}

3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829
/*
 * Compute the cpu's hierarchical load factor for each task group.
 * This needs to be done in a top-down fashion because the load of a child
 * group is a fraction of its parents load.
 */
static int tg_load_down(struct task_group *tg, void *data)
{
	unsigned long load;
	long cpu = (long)data;

	if (!tg->parent) {
		load = cpu_rq(cpu)->load.weight;
	} else {
		load = tg->parent->cfs_rq[cpu]->h_load;
		load *= tg->se[cpu]->load.weight;
		load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
	}

	tg->cfs_rq[cpu]->h_load = load;

	return 0;
}

static void update_h_load(long cpu)
{
3830 3831 3832 3833 3834 3835 3836 3837
	struct rq *rq = cpu_rq(cpu);
	unsigned long now = jiffies;

	if (rq->h_load_throttle == now)
		return;

	rq->h_load_throttle = now;

3838
	rcu_read_lock();
3839
	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
3840
	rcu_read_unlock();
3841 3842
}

3843
static unsigned long task_h_load(struct task_struct *p)
P
Peter Zijlstra 已提交
3844
{
3845 3846
	struct cfs_rq *cfs_rq = task_cfs_rq(p);
	unsigned long load;
P
Peter Zijlstra 已提交
3847

3848 3849
	load = p->se.load.weight;
	load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
P
Peter Zijlstra 已提交
3850

3851
	return load;
P
Peter Zijlstra 已提交
3852 3853
}
#else
3854 3855 3856 3857
static inline void update_shares(int cpu)
{
}

3858
static inline void update_h_load(long cpu)
P
Peter Zijlstra 已提交
3859 3860 3861
{
}

3862
static unsigned long task_h_load(struct task_struct *p)
3863
{
3864
	return p->se.load.weight;
3865
}
P
Peter Zijlstra 已提交
3866
#endif
3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883

/********** Helpers for find_busiest_group ************************/
/*
 * sd_lb_stats - Structure to store the statistics of a sched_domain
 * 		during load balancing.
 */
struct sd_lb_stats {
	struct sched_group *busiest; /* Busiest group in this sd */
	struct sched_group *this;  /* Local group in this sd */
	unsigned long total_load;  /* Total load of all groups in sd */
	unsigned long total_pwr;   /*	Total power of all groups in sd */
	unsigned long avg_load;	   /* Average load across all groups in sd */

	/** Statistics of this group */
	unsigned long this_load;
	unsigned long this_load_per_task;
	unsigned long this_nr_running;
3884
	unsigned long this_has_capacity;
3885
	unsigned int  this_idle_cpus;
3886 3887

	/* Statistics of the busiest group */
3888
	unsigned int  busiest_idle_cpus;
3889 3890 3891
	unsigned long max_load;
	unsigned long busiest_load_per_task;
	unsigned long busiest_nr_running;
3892
	unsigned long busiest_group_capacity;
3893
	unsigned long busiest_has_capacity;
3894
	unsigned int  busiest_group_weight;
3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907

	int group_imb; /* Is there imbalance in this sd */
};

/*
 * sg_lb_stats - stats of a sched_group required for load_balancing
 */
struct sg_lb_stats {
	unsigned long avg_load; /*Avg load across the CPUs of the group */
	unsigned long group_load; /* Total load over the CPUs of the group */
	unsigned long sum_nr_running; /* Nr tasks running in the group */
	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
	unsigned long group_capacity;
3908 3909
	unsigned long idle_cpus;
	unsigned long group_weight;
3910
	int group_imb; /* Is there an imbalance in the group ? */
3911
	int group_has_capacity; /* Is there extra capacity in the group? */
3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941
};

/**
 * get_sd_load_idx - Obtain the load index for a given sched domain.
 * @sd: The sched_domain whose load_idx is to be obtained.
 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
 */
static inline int get_sd_load_idx(struct sched_domain *sd,
					enum cpu_idle_type idle)
{
	int load_idx;

	switch (idle) {
	case CPU_NOT_IDLE:
		load_idx = sd->busy_idx;
		break;

	case CPU_NEWLY_IDLE:
		load_idx = sd->newidle_idx;
		break;
	default:
		load_idx = sd->idle_idx;
		break;
	}

	return load_idx;
}

unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
{
3942
	return SCHED_POWER_SCALE;
3943 3944 3945 3946 3947 3948 3949 3950 3951
}

unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
{
	return default_scale_freq_power(sd, cpu);
}

unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
{
3952
	unsigned long weight = sd->span_weight;
3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967
	unsigned long smt_gain = sd->smt_gain;

	smt_gain /= weight;

	return smt_gain;
}

unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
{
	return default_scale_smt_power(sd, cpu);
}

unsigned long scale_rt_power(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
3968
	u64 total, available, age_stamp, avg;
3969

3970 3971 3972 3973 3974 3975 3976 3977
	/*
	 * Since we're reading these variables without serialization make sure
	 * we read them once before doing sanity checks on them.
	 */
	age_stamp = ACCESS_ONCE(rq->age_stamp);
	avg = ACCESS_ONCE(rq->rt_avg);

	total = sched_avg_period() + (rq->clock - age_stamp);
3978

3979
	if (unlikely(total < avg)) {
3980 3981 3982
		/* Ensures that power won't end up being negative */
		available = 0;
	} else {
3983
		available = total - avg;
3984
	}
3985

3986 3987
	if (unlikely((s64)total < SCHED_POWER_SCALE))
		total = SCHED_POWER_SCALE;
3988

3989
	total >>= SCHED_POWER_SHIFT;
3990 3991 3992 3993 3994 3995

	return div_u64(available, total);
}

static void update_cpu_power(struct sched_domain *sd, int cpu)
{
3996
	unsigned long weight = sd->span_weight;
3997
	unsigned long power = SCHED_POWER_SCALE;
3998 3999 4000 4001 4002 4003 4004 4005
	struct sched_group *sdg = sd->groups;

	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
		if (sched_feat(ARCH_POWER))
			power *= arch_scale_smt_power(sd, cpu);
		else
			power *= default_scale_smt_power(sd, cpu);

4006
		power >>= SCHED_POWER_SHIFT;
4007 4008
	}

4009
	sdg->sgp->power_orig = power;
4010 4011 4012 4013 4014 4015

	if (sched_feat(ARCH_POWER))
		power *= arch_scale_freq_power(sd, cpu);
	else
		power *= default_scale_freq_power(sd, cpu);

4016
	power >>= SCHED_POWER_SHIFT;
4017

4018
	power *= scale_rt_power(cpu);
4019
	power >>= SCHED_POWER_SHIFT;
4020 4021 4022 4023

	if (!power)
		power = 1;

4024
	cpu_rq(cpu)->cpu_power = power;
4025
	sdg->sgp->power = power;
4026 4027
}

4028
void update_group_power(struct sched_domain *sd, int cpu)
4029 4030 4031 4032
{
	struct sched_domain *child = sd->child;
	struct sched_group *group, *sdg = sd->groups;
	unsigned long power;
4033 4034 4035 4036 4037
	unsigned long interval;

	interval = msecs_to_jiffies(sd->balance_interval);
	interval = clamp(interval, 1UL, max_load_balance_interval);
	sdg->sgp->next_update = jiffies + interval;
4038 4039 4040 4041 4042 4043 4044 4045

	if (!child) {
		update_cpu_power(sd, cpu);
		return;
	}

	power = 0;

P
Peter Zijlstra 已提交
4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065
	if (child->flags & SD_OVERLAP) {
		/*
		 * SD_OVERLAP domains cannot assume that child groups
		 * span the current group.
		 */

		for_each_cpu(cpu, sched_group_cpus(sdg))
			power += power_of(cpu);
	} else  {
		/*
		 * !SD_OVERLAP domains can assume that child groups
		 * span the current group.
		 */ 

		group = child->groups;
		do {
			power += group->sgp->power;
			group = group->next;
		} while (group != child->groups);
	}
4066

4067
	sdg->sgp->power_orig = sdg->sgp->power = power;
4068 4069
}

4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080
/*
 * Try and fix up capacity for tiny siblings, this is needed when
 * things like SD_ASYM_PACKING need f_b_g to select another sibling
 * which on its own isn't powerful enough.
 *
 * See update_sd_pick_busiest() and check_asym_packing().
 */
static inline int
fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
{
	/*
4081
	 * Only siblings can have significantly less than SCHED_POWER_SCALE
4082
	 */
P
Peter Zijlstra 已提交
4083
	if (!(sd->flags & SD_SHARE_CPUPOWER))
4084 4085 4086 4087 4088
		return 0;

	/*
	 * If ~90% of the cpu_power is still there, we're good.
	 */
4089
	if (group->sgp->power * 32 > group->sgp->power_orig * 29)
4090 4091 4092 4093 4094
		return 1;

	return 0;
}

4095 4096
/**
 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
4097
 * @env: The load balancing environment.
4098 4099 4100 4101 4102 4103
 * @group: sched_group whose statistics are to be updated.
 * @load_idx: Load index of sched_domain of this_cpu for load calc.
 * @local_group: Does group contain this_cpu.
 * @balance: Should we balance.
 * @sgs: variable to hold the statistics for this group.
 */
4104 4105
static inline void update_sg_lb_stats(struct lb_env *env,
			struct sched_group *group, int load_idx,
4106
			int local_group, int *balance, struct sg_lb_stats *sgs)
4107
{
4108 4109
	unsigned long nr_running, max_nr_running, min_nr_running;
	unsigned long load, max_cpu_load, min_cpu_load;
4110
	unsigned int balance_cpu = -1, first_idle_cpu = 0;
4111
	unsigned long avg_load_per_task = 0;
4112
	int i;
4113

4114
	if (local_group)
P
Peter Zijlstra 已提交
4115
		balance_cpu = group_balance_cpu(group);
4116 4117 4118 4119

	/* Tally up the load of all CPUs in the group */
	max_cpu_load = 0;
	min_cpu_load = ~0UL;
4120
	max_nr_running = 0;
4121
	min_nr_running = ~0UL;
4122

4123
	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
4124 4125
		struct rq *rq = cpu_rq(i);

4126 4127
		nr_running = rq->nr_running;

4128 4129
		/* Bias balancing toward cpus of our domain */
		if (local_group) {
P
Peter Zijlstra 已提交
4130 4131
			if (idle_cpu(i) && !first_idle_cpu &&
					cpumask_test_cpu(i, sched_group_mask(group))) {
4132
				first_idle_cpu = 1;
4133 4134
				balance_cpu = i;
			}
4135 4136

			load = target_load(i, load_idx);
4137 4138
		} else {
			load = source_load(i, load_idx);
4139
			if (load > max_cpu_load)
4140 4141 4142
				max_cpu_load = load;
			if (min_cpu_load > load)
				min_cpu_load = load;
4143 4144 4145 4146 4147

			if (nr_running > max_nr_running)
				max_nr_running = nr_running;
			if (min_nr_running > nr_running)
				min_nr_running = nr_running;
4148 4149 4150
		}

		sgs->group_load += load;
4151
		sgs->sum_nr_running += nr_running;
4152
		sgs->sum_weighted_load += weighted_cpuload(i);
4153 4154
		if (idle_cpu(i))
			sgs->idle_cpus++;
4155 4156 4157 4158 4159 4160 4161 4162
	}

	/*
	 * First idle cpu or the first cpu(busiest) in this sched group
	 * is eligible for doing load balancing at this and above
	 * domains. In the newly idle case, we will allow all the cpu's
	 * to do the newly idle load balance.
	 */
4163
	if (local_group) {
4164
		if (env->idle != CPU_NEWLY_IDLE) {
4165
			if (balance_cpu != env->dst_cpu) {
4166 4167 4168
				*balance = 0;
				return;
			}
4169
			update_group_power(env->sd, env->dst_cpu);
4170
		} else if (time_after_eq(jiffies, group->sgp->next_update))
4171
			update_group_power(env->sd, env->dst_cpu);
4172 4173 4174
	}

	/* Adjust by relative CPU power of the group */
4175
	sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
4176 4177 4178

	/*
	 * Consider the group unbalanced when the imbalance is larger
P
Peter Zijlstra 已提交
4179
	 * than the average weight of a task.
4180 4181 4182 4183 4184 4185
	 *
	 * APZ: with cgroup the avg task weight can vary wildly and
	 *      might not be a suitable number - should we keep a
	 *      normalized nr_running number somewhere that negates
	 *      the hierarchy?
	 */
4186 4187
	if (sgs->sum_nr_running)
		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
4188

4189 4190
	if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
	    (max_nr_running - min_nr_running) > 1)
4191 4192
		sgs->group_imb = 1;

4193
	sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
4194
						SCHED_POWER_SCALE);
4195
	if (!sgs->group_capacity)
4196
		sgs->group_capacity = fix_small_capacity(env->sd, group);
4197
	sgs->group_weight = group->group_weight;
4198 4199 4200

	if (sgs->group_capacity > sgs->sum_nr_running)
		sgs->group_has_capacity = 1;
4201 4202
}

4203 4204
/**
 * update_sd_pick_busiest - return 1 on busiest group
4205
 * @env: The load balancing environment.
4206 4207
 * @sds: sched_domain statistics
 * @sg: sched_group candidate to be checked for being the busiest
4208
 * @sgs: sched_group statistics
4209 4210 4211 4212
 *
 * Determine if @sg is a busier group than the previously selected
 * busiest group.
 */
4213
static bool update_sd_pick_busiest(struct lb_env *env,
4214 4215
				   struct sd_lb_stats *sds,
				   struct sched_group *sg,
4216
				   struct sg_lb_stats *sgs)
4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
{
	if (sgs->avg_load <= sds->max_load)
		return false;

	if (sgs->sum_nr_running > sgs->group_capacity)
		return true;

	if (sgs->group_imb)
		return true;

	/*
	 * ASYM_PACKING needs to move all the work to the lowest
	 * numbered CPUs in the group, therefore mark all groups
	 * higher than ourself as busy.
	 */
4232 4233
	if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
	    env->dst_cpu < group_first_cpu(sg)) {
4234 4235 4236 4237 4238 4239 4240 4241 4242 4243
		if (!sds->busiest)
			return true;

		if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
			return true;
	}

	return false;
}

4244
/**
4245
 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
4246
 * @env: The load balancing environment.
4247 4248 4249
 * @balance: Should we balance.
 * @sds: variable to hold the statistics for this sched_domain.
 */
4250
static inline void update_sd_lb_stats(struct lb_env *env,
4251
					int *balance, struct sd_lb_stats *sds)
4252
{
4253 4254
	struct sched_domain *child = env->sd->child;
	struct sched_group *sg = env->sd->groups;
4255 4256 4257 4258 4259 4260
	struct sg_lb_stats sgs;
	int load_idx, prefer_sibling = 0;

	if (child && child->flags & SD_PREFER_SIBLING)
		prefer_sibling = 1;

4261
	load_idx = get_sd_load_idx(env->sd, env->idle);
4262 4263 4264 4265

	do {
		int local_group;

4266
		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
4267
		memset(&sgs, 0, sizeof(sgs));
4268
		update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
4269

P
Peter Zijlstra 已提交
4270
		if (local_group && !(*balance))
4271 4272 4273
			return;

		sds->total_load += sgs.group_load;
4274
		sds->total_pwr += sg->sgp->power;
4275 4276 4277

		/*
		 * In case the child domain prefers tasks go to siblings
4278
		 * first, lower the sg capacity to one so that we'll try
4279 4280 4281 4282 4283 4284
		 * and move all the excess tasks away. We lower the capacity
		 * of a group only if the local group has the capacity to fit
		 * these excess tasks, i.e. nr_running < group_capacity. The
		 * extra check prevents the case where you always pull from the
		 * heaviest group when it is already under-utilized (possible
		 * with a large weight task outweighs the tasks on the system).
4285
		 */
4286
		if (prefer_sibling && !local_group && sds->this_has_capacity)
4287 4288 4289 4290
			sgs.group_capacity = min(sgs.group_capacity, 1UL);

		if (local_group) {
			sds->this_load = sgs.avg_load;
4291
			sds->this = sg;
4292 4293
			sds->this_nr_running = sgs.sum_nr_running;
			sds->this_load_per_task = sgs.sum_weighted_load;
4294
			sds->this_has_capacity = sgs.group_has_capacity;
4295
			sds->this_idle_cpus = sgs.idle_cpus;
4296
		} else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
4297
			sds->max_load = sgs.avg_load;
4298
			sds->busiest = sg;
4299
			sds->busiest_nr_running = sgs.sum_nr_running;
4300
			sds->busiest_idle_cpus = sgs.idle_cpus;
4301
			sds->busiest_group_capacity = sgs.group_capacity;
4302
			sds->busiest_load_per_task = sgs.sum_weighted_load;
4303
			sds->busiest_has_capacity = sgs.group_has_capacity;
4304
			sds->busiest_group_weight = sgs.group_weight;
4305 4306 4307
			sds->group_imb = sgs.group_imb;
		}

4308
		sg = sg->next;
4309
	} while (sg != env->sd->groups);
4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328
}

/**
 * check_asym_packing - Check to see if the group is packed into the
 *			sched doman.
 *
 * This is primarily intended to used at the sibling level.  Some
 * cores like POWER7 prefer to use lower numbered SMT threads.  In the
 * case of POWER7, it can move to lower SMT modes only when higher
 * threads are idle.  When in lower SMT modes, the threads will
 * perform better since they share less core resources.  Hence when we
 * have idle threads, we want them to be the higher ones.
 *
 * This packing function is run on idle threads.  It checks to see if
 * the busiest CPU in this domain (core in the P7 case) has a higher
 * CPU number than the packing function is being run on.  Here we are
 * assuming lower CPU number will be equivalent to lower a SMT thread
 * number.
 *
4329 4330 4331
 * Returns 1 when packing is required and a task should be moved to
 * this CPU.  The amount of the imbalance is returned in *imbalance.
 *
4332
 * @env: The load balancing environment.
4333 4334
 * @sds: Statistics of the sched_domain which is to be packed
 */
4335
static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
4336 4337 4338
{
	int busiest_cpu;

4339
	if (!(env->sd->flags & SD_ASYM_PACKING))
4340 4341 4342 4343 4344 4345
		return 0;

	if (!sds->busiest)
		return 0;

	busiest_cpu = group_first_cpu(sds->busiest);
4346
	if (env->dst_cpu > busiest_cpu)
4347 4348
		return 0;

4349 4350 4351
	env->imbalance = DIV_ROUND_CLOSEST(
		sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);

4352
	return 1;
4353 4354 4355 4356 4357 4358
}

/**
 * fix_small_imbalance - Calculate the minor imbalance that exists
 *			amongst the groups of a sched_domain, during
 *			load balancing.
4359
 * @env: The load balancing environment.
4360 4361
 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
 */
4362 4363
static inline
void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4364 4365 4366
{
	unsigned long tmp, pwr_now = 0, pwr_move = 0;
	unsigned int imbn = 2;
4367
	unsigned long scaled_busy_load_per_task;
4368 4369 4370 4371 4372 4373

	if (sds->this_nr_running) {
		sds->this_load_per_task /= sds->this_nr_running;
		if (sds->busiest_load_per_task >
				sds->this_load_per_task)
			imbn = 1;
4374
	} else {
4375
		sds->this_load_per_task =
4376 4377
			cpu_avg_load_per_task(env->dst_cpu);
	}
4378

4379
	scaled_busy_load_per_task = sds->busiest_load_per_task
4380
					 * SCHED_POWER_SCALE;
4381
	scaled_busy_load_per_task /= sds->busiest->sgp->power;
4382 4383 4384

	if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
			(scaled_busy_load_per_task * imbn)) {
4385
		env->imbalance = sds->busiest_load_per_task;
4386 4387 4388 4389 4390 4391 4392 4393 4394
		return;
	}

	/*
	 * OK, we don't have enough imbalance to justify moving tasks,
	 * however we may be able to increase total CPU power used by
	 * moving them.
	 */

4395
	pwr_now += sds->busiest->sgp->power *
4396
			min(sds->busiest_load_per_task, sds->max_load);
4397
	pwr_now += sds->this->sgp->power *
4398
			min(sds->this_load_per_task, sds->this_load);
4399
	pwr_now /= SCHED_POWER_SCALE;
4400 4401

	/* Amount of load we'd subtract */
4402
	tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4403
		sds->busiest->sgp->power;
4404
	if (sds->max_load > tmp)
4405
		pwr_move += sds->busiest->sgp->power *
4406 4407 4408
			min(sds->busiest_load_per_task, sds->max_load - tmp);

	/* Amount of load we'd add */
4409
	if (sds->max_load * sds->busiest->sgp->power <
4410
		sds->busiest_load_per_task * SCHED_POWER_SCALE)
4411 4412
		tmp = (sds->max_load * sds->busiest->sgp->power) /
			sds->this->sgp->power;
4413
	else
4414
		tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4415 4416
			sds->this->sgp->power;
	pwr_move += sds->this->sgp->power *
4417
			min(sds->this_load_per_task, sds->this_load + tmp);
4418
	pwr_move /= SCHED_POWER_SCALE;
4419 4420 4421

	/* Move if we gain throughput */
	if (pwr_move > pwr_now)
4422
		env->imbalance = sds->busiest_load_per_task;
4423 4424 4425 4426 4427
}

/**
 * calculate_imbalance - Calculate the amount of imbalance present within the
 *			 groups of a given sched_domain during load balance.
4428
 * @env: load balance environment
4429 4430
 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
 */
4431
static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4432
{
4433 4434 4435 4436 4437 4438 4439 4440
	unsigned long max_pull, load_above_capacity = ~0UL;

	sds->busiest_load_per_task /= sds->busiest_nr_running;
	if (sds->group_imb) {
		sds->busiest_load_per_task =
			min(sds->busiest_load_per_task, sds->avg_load);
	}

4441 4442 4443 4444 4445 4446
	/*
	 * In the presence of smp nice balancing, certain scenarios can have
	 * max load less than avg load(as we skip the groups at or below
	 * its cpu_power, while calculating max_load..)
	 */
	if (sds->max_load < sds->avg_load) {
4447 4448
		env->imbalance = 0;
		return fix_small_imbalance(env, sds);
4449 4450
	}

4451 4452 4453 4454 4455 4456 4457
	if (!sds->group_imb) {
		/*
		 * Don't want to pull so many tasks that a group would go idle.
		 */
		load_above_capacity = (sds->busiest_nr_running -
						sds->busiest_group_capacity);

4458
		load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4459

4460
		load_above_capacity /= sds->busiest->sgp->power;
4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473
	}

	/*
	 * We're trying to get all the cpus to the average_load, so we don't
	 * want to push ourselves above the average load, nor do we wish to
	 * reduce the max loaded cpu below the average load. At the same time,
	 * we also don't want to reduce the group load below the group capacity
	 * (so that we can implement power-savings policies etc). Thus we look
	 * for the minimum possible imbalance.
	 * Be careful of negative numbers as they'll appear as very large values
	 * with unsigned longs.
	 */
	max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4474 4475

	/* How much load to actually move to equalise the imbalance */
4476
	env->imbalance = min(max_pull * sds->busiest->sgp->power,
4477
		(sds->avg_load - sds->this_load) * sds->this->sgp->power)
4478
			/ SCHED_POWER_SCALE;
4479 4480 4481

	/*
	 * if *imbalance is less than the average load per runnable task
L
Lucas De Marchi 已提交
4482
	 * there is no guarantee that any tasks will be moved so we'll have
4483 4484 4485
	 * a think about bumping its value to force at least one task to be
	 * moved
	 */
4486 4487
	if (env->imbalance < sds->busiest_load_per_task)
		return fix_small_imbalance(env, sds);
4488 4489

}
4490

4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502
/******* find_busiest_group() helpers end here *********************/

/**
 * find_busiest_group - Returns the busiest group within the sched_domain
 * if there is an imbalance. If there isn't an imbalance, and
 * the user has opted for power-savings, it returns a group whose
 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
 * such a group exists.
 *
 * Also calculates the amount of weighted load which should be moved
 * to restore balance.
 *
4503
 * @env: The load balancing environment.
4504 4505 4506 4507 4508 4509 4510 4511 4512
 * @balance: Pointer to a variable indicating if this_cpu
 *	is the appropriate cpu to perform load balancing at this_level.
 *
 * Returns:	- the busiest group if imbalance exists.
 *		- If no imbalance and user has opted for power-savings balance,
 *		   return the least loaded group whose CPUs can be
 *		   put to idle by rebalancing its tasks onto our group.
 */
static struct sched_group *
4513
find_busiest_group(struct lb_env *env, int *balance)
4514 4515 4516 4517 4518 4519 4520 4521 4522
{
	struct sd_lb_stats sds;

	memset(&sds, 0, sizeof(sds));

	/*
	 * Compute the various statistics relavent for load balancing at
	 * this level.
	 */
4523
	update_sd_lb_stats(env, balance, &sds);
4524

4525 4526 4527
	/*
	 * this_cpu is not the appropriate cpu to perform load balancing at
	 * this level.
4528
	 */
P
Peter Zijlstra 已提交
4529
	if (!(*balance))
4530 4531
		goto ret;

4532 4533
	if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
	    check_asym_packing(env, &sds))
4534 4535
		return sds.busiest;

4536
	/* There is no busy sibling group to pull tasks from */
4537 4538 4539
	if (!sds.busiest || sds.busiest_nr_running == 0)
		goto out_balanced;

4540
	sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
4541

P
Peter Zijlstra 已提交
4542 4543 4544 4545 4546 4547 4548 4549
	/*
	 * If the busiest group is imbalanced the below checks don't
	 * work because they assumes all things are equal, which typically
	 * isn't true due to cpus_allowed constraints and the like.
	 */
	if (sds.group_imb)
		goto force_balance;

4550
	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4551
	if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
4552 4553 4554
			!sds.busiest_has_capacity)
		goto force_balance;

4555 4556 4557 4558
	/*
	 * If the local group is more busy than the selected busiest group
	 * don't try and pull any tasks.
	 */
4559 4560 4561
	if (sds.this_load >= sds.max_load)
		goto out_balanced;

4562 4563 4564 4565
	/*
	 * Don't pull any tasks if this group is already above the domain
	 * average load.
	 */
4566 4567 4568
	if (sds.this_load >= sds.avg_load)
		goto out_balanced;

4569
	if (env->idle == CPU_IDLE) {
4570 4571 4572 4573 4574 4575
		/*
		 * This cpu is idle. If the busiest group load doesn't
		 * have more tasks than the number of available cpu's and
		 * there is no imbalance between this and busiest group
		 * wrt to idle cpu's, it is balanced.
		 */
4576
		if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
4577 4578
		    sds.busiest_nr_running <= sds.busiest_group_weight)
			goto out_balanced;
4579 4580 4581 4582 4583
	} else {
		/*
		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
		 * imbalance_pct to be conservative.
		 */
4584
		if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
4585
			goto out_balanced;
4586
	}
4587

4588
force_balance:
4589
	/* Looks like there is an imbalance. Compute it */
4590
	calculate_imbalance(env, &sds);
4591 4592 4593 4594
	return sds.busiest;

out_balanced:
ret:
4595
	env->imbalance = 0;
4596 4597 4598 4599 4600 4601
	return NULL;
}

/*
 * find_busiest_queue - find the busiest runqueue among the cpus in group.
 */
4602
static struct rq *find_busiest_queue(struct lb_env *env,
4603
				     struct sched_group *group)
4604 4605 4606 4607 4608 4609 4610
{
	struct rq *busiest = NULL, *rq;
	unsigned long max_load = 0;
	int i;

	for_each_cpu(i, sched_group_cpus(group)) {
		unsigned long power = power_of(i);
4611 4612
		unsigned long capacity = DIV_ROUND_CLOSEST(power,
							   SCHED_POWER_SCALE);
4613 4614
		unsigned long wl;

4615
		if (!capacity)
4616
			capacity = fix_small_capacity(env->sd, group);
4617

4618
		if (!cpumask_test_cpu(i, env->cpus))
4619 4620 4621
			continue;

		rq = cpu_rq(i);
4622
		wl = weighted_cpuload(i);
4623

4624 4625 4626 4627
		/*
		 * When comparing with imbalance, use weighted_cpuload()
		 * which is not scaled with the cpu power.
		 */
4628
		if (capacity && rq->nr_running == 1 && wl > env->imbalance)
4629 4630
			continue;

4631 4632 4633 4634 4635 4636
		/*
		 * For the load comparisons with the other cpu's, consider
		 * the weighted_cpuload() scaled with the cpu power, so that
		 * the load can be moved away from the cpu that is potentially
		 * running at a lower capacity.
		 */
4637
		wl = (wl * SCHED_POWER_SCALE) / power;
4638

4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654
		if (wl > max_load) {
			max_load = wl;
			busiest = rq;
		}
	}

	return busiest;
}

/*
 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
 * so long as it is large enough.
 */
#define MAX_PINNED_INTERVAL	512

/* Working cpumask for load_balance and load_balance_newidle. */
4655
DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
4656

4657
static int need_active_balance(struct lb_env *env)
4658
{
4659 4660 4661
	struct sched_domain *sd = env->sd;

	if (env->idle == CPU_NEWLY_IDLE) {
4662 4663 4664 4665 4666 4667

		/*
		 * ASYM_PACKING needs to force migrate tasks from busy but
		 * higher numbered CPUs in order to pack all tasks in the
		 * lowest numbered CPUs.
		 */
4668
		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
4669
			return 1;
4670 4671 4672 4673 4674
	}

	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}

4675 4676
static int active_load_balance_cpu_stop(void *data);

4677 4678 4679 4680 4681 4682 4683 4684
/*
 * Check this_cpu to ensure it is balanced within domain. Attempt to move
 * tasks if there is an imbalance.
 */
static int load_balance(int this_cpu, struct rq *this_rq,
			struct sched_domain *sd, enum cpu_idle_type idle,
			int *balance)
{
4685 4686
	int ld_moved, cur_ld_moved, active_balance = 0;
	int lb_iterations, max_lb_iterations;
4687 4688 4689 4690 4691
	struct sched_group *group;
	struct rq *busiest;
	unsigned long flags;
	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);

4692 4693
	struct lb_env env = {
		.sd		= sd,
4694 4695
		.dst_cpu	= this_cpu,
		.dst_rq		= this_rq,
4696
		.dst_grpmask    = sched_group_cpus(sd->groups),
4697
		.idle		= idle,
4698
		.loop_break	= sched_nr_migrate_break,
4699
		.cpus		= cpus,
4700 4701
	};

4702
	cpumask_copy(cpus, cpu_active_mask);
4703
	max_lb_iterations = cpumask_weight(env.dst_grpmask);
4704 4705 4706 4707

	schedstat_inc(sd, lb_count[idle]);

redo:
4708
	group = find_busiest_group(&env, balance);
4709 4710 4711 4712 4713 4714 4715 4716 4717

	if (*balance == 0)
		goto out_balanced;

	if (!group) {
		schedstat_inc(sd, lb_nobusyg[idle]);
		goto out_balanced;
	}

4718
	busiest = find_busiest_queue(&env, group);
4719 4720 4721 4722 4723
	if (!busiest) {
		schedstat_inc(sd, lb_nobusyq[idle]);
		goto out_balanced;
	}

4724
	BUG_ON(busiest == env.dst_rq);
4725

4726
	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
4727 4728

	ld_moved = 0;
4729
	lb_iterations = 1;
4730 4731 4732 4733 4734 4735 4736
	if (busiest->nr_running > 1) {
		/*
		 * Attempt to move tasks. If find_busiest_group has found
		 * an imbalance but busiest->nr_running <= 1, the group is
		 * still unbalanced. ld_moved simply stays zero, so it is
		 * correctly treated as an imbalance.
		 */
4737
		env.flags |= LBF_ALL_PINNED;
4738 4739 4740
		env.src_cpu   = busiest->cpu;
		env.src_rq    = busiest;
		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
4741

4742
		update_h_load(env.src_cpu);
4743
more_balance:
4744
		local_irq_save(flags);
4745
		double_rq_lock(env.dst_rq, busiest);
4746 4747 4748 4749 4750 4751 4752

		/*
		 * cur_ld_moved - load moved in current iteration
		 * ld_moved     - cumulative load moved across iterations
		 */
		cur_ld_moved = move_tasks(&env);
		ld_moved += cur_ld_moved;
4753
		double_rq_unlock(env.dst_rq, busiest);
4754 4755
		local_irq_restore(flags);

4756 4757 4758 4759 4760
		if (env.flags & LBF_NEED_BREAK) {
			env.flags &= ~LBF_NEED_BREAK;
			goto more_balance;
		}

4761 4762 4763
		/*
		 * some other cpu did the load balance for us.
		 */
4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788
		if (cur_ld_moved && env.dst_cpu != smp_processor_id())
			resched_cpu(env.dst_cpu);

		/*
		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
		 * us and move them to an alternate dst_cpu in our sched_group
		 * where they can run. The upper limit on how many times we
		 * iterate on same src_cpu is dependent on number of cpus in our
		 * sched_group.
		 *
		 * This changes load balance semantics a bit on who can move
		 * load to a given_cpu. In addition to the given_cpu itself
		 * (or a ilb_cpu acting on its behalf where given_cpu is
		 * nohz-idle), we now have balance_cpu in a position to move
		 * load to given_cpu. In rare situations, this may cause
		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
		 * _independently_ and at _same_ time to move some load to
		 * given_cpu) causing exceess load to be moved to given_cpu.
		 * This however should not happen so much in practice and
		 * moreover subsequent load balance cycles should correct the
		 * excess load moved.
		 */
		if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 &&
				lb_iterations++ < max_lb_iterations) {

4789
			env.dst_rq	 = cpu_rq(env.new_dst_cpu);
4790 4791 4792 4793 4794 4795 4796 4797 4798 4799
			env.dst_cpu	 = env.new_dst_cpu;
			env.flags	&= ~LBF_SOME_PINNED;
			env.loop	 = 0;
			env.loop_break	 = sched_nr_migrate_break;
			/*
			 * Go back to "more_balance" rather than "redo" since we
			 * need to continue with same src_cpu.
			 */
			goto more_balance;
		}
4800 4801

		/* All tasks on this runqueue were pinned by CPU affinity */
4802
		if (unlikely(env.flags & LBF_ALL_PINNED)) {
4803
			cpumask_clear_cpu(cpu_of(busiest), cpus);
4804 4805 4806
			if (!cpumask_empty(cpus)) {
				env.loop = 0;
				env.loop_break = sched_nr_migrate_break;
4807
				goto redo;
4808
			}
4809 4810 4811 4812 4813 4814
			goto out_balanced;
		}
	}

	if (!ld_moved) {
		schedstat_inc(sd, lb_failed[idle]);
4815 4816 4817 4818 4819 4820 4821 4822
		/*
		 * Increment the failure counter only on periodic balance.
		 * We do not want newidle balance, which can be very
		 * frequent, pollute the failure counter causing
		 * excessive cache_hot migrations and active balances.
		 */
		if (idle != CPU_NEWLY_IDLE)
			sd->nr_balance_failed++;
4823

4824
		if (need_active_balance(&env)) {
4825 4826
			raw_spin_lock_irqsave(&busiest->lock, flags);

4827 4828 4829
			/* don't kick the active_load_balance_cpu_stop,
			 * if the curr task on busiest cpu can't be
			 * moved to this_cpu
4830 4831
			 */
			if (!cpumask_test_cpu(this_cpu,
4832
					tsk_cpus_allowed(busiest->curr))) {
4833 4834
				raw_spin_unlock_irqrestore(&busiest->lock,
							    flags);
4835
				env.flags |= LBF_ALL_PINNED;
4836 4837 4838
				goto out_one_pinned;
			}

4839 4840 4841 4842 4843
			/*
			 * ->active_balance synchronizes accesses to
			 * ->active_balance_work.  Once set, it's cleared
			 * only after active load balance is finished.
			 */
4844 4845 4846 4847 4848 4849
			if (!busiest->active_balance) {
				busiest->active_balance = 1;
				busiest->push_cpu = this_cpu;
				active_balance = 1;
			}
			raw_spin_unlock_irqrestore(&busiest->lock, flags);
4850

4851
			if (active_balance) {
4852 4853 4854
				stop_one_cpu_nowait(cpu_of(busiest),
					active_load_balance_cpu_stop, busiest,
					&busiest->active_balance_work);
4855
			}
4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888

			/*
			 * We've kicked active balancing, reset the failure
			 * counter.
			 */
			sd->nr_balance_failed = sd->cache_nice_tries+1;
		}
	} else
		sd->nr_balance_failed = 0;

	if (likely(!active_balance)) {
		/* We were unbalanced, so reset the balancing interval */
		sd->balance_interval = sd->min_interval;
	} else {
		/*
		 * If we've begun active balancing, start to back off. This
		 * case may not be covered by the all_pinned logic if there
		 * is only 1 task on the busy runqueue (because we don't call
		 * move_tasks).
		 */
		if (sd->balance_interval < sd->max_interval)
			sd->balance_interval *= 2;
	}

	goto out;

out_balanced:
	schedstat_inc(sd, lb_balanced[idle]);

	sd->nr_balance_failed = 0;

out_one_pinned:
	/* tune up the balancing interval */
4889
	if (((env.flags & LBF_ALL_PINNED) &&
4890
			sd->balance_interval < MAX_PINNED_INTERVAL) ||
4891 4892 4893
			(sd->balance_interval < sd->max_interval))
		sd->balance_interval *= 2;

4894
	ld_moved = 0;
4895 4896 4897 4898 4899 4900 4901 4902
out:
	return ld_moved;
}

/*
 * idle_balance is called by schedule() if this_cpu is about to become
 * idle. Attempts to pull tasks from other CPUs.
 */
4903
void idle_balance(int this_cpu, struct rq *this_rq)
4904 4905 4906 4907 4908 4909 4910 4911 4912 4913
{
	struct sched_domain *sd;
	int pulled_task = 0;
	unsigned long next_balance = jiffies + HZ;

	this_rq->idle_stamp = this_rq->clock;

	if (this_rq->avg_idle < sysctl_sched_migration_cost)
		return;

4914 4915
	update_rq_runnable_avg(this_rq, 1);

4916 4917 4918 4919 4920
	/*
	 * Drop the rq->lock, but keep IRQ/preempt disabled.
	 */
	raw_spin_unlock(&this_rq->lock);

P
Paul Turner 已提交
4921
	update_shares(this_cpu);
4922
	rcu_read_lock();
4923 4924
	for_each_domain(this_cpu, sd) {
		unsigned long interval;
4925
		int balance = 1;
4926 4927 4928 4929

		if (!(sd->flags & SD_LOAD_BALANCE))
			continue;

4930
		if (sd->flags & SD_BALANCE_NEWIDLE) {
4931
			/* If we've pulled tasks over stop searching: */
4932 4933 4934
			pulled_task = load_balance(this_cpu, this_rq,
						   sd, CPU_NEWLY_IDLE, &balance);
		}
4935 4936 4937 4938

		interval = msecs_to_jiffies(sd->balance_interval);
		if (time_after(next_balance, sd->last_balance + interval))
			next_balance = sd->last_balance + interval;
N
Nikhil Rao 已提交
4939 4940
		if (pulled_task) {
			this_rq->idle_stamp = 0;
4941
			break;
N
Nikhil Rao 已提交
4942
		}
4943
	}
4944
	rcu_read_unlock();
4945 4946 4947

	raw_spin_lock(&this_rq->lock);

4948 4949 4950 4951 4952 4953 4954 4955 4956 4957
	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
		/*
		 * We are going idle. next_balance may be set based on
		 * a busy processor. So reset next_balance.
		 */
		this_rq->next_balance = next_balance;
	}
}

/*
4958 4959 4960 4961
 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
 * running tasks off the busiest CPU onto idle CPUs. It requires at
 * least 1 task to be running on each physical CPU where possible, and
 * avoids physical / logical imbalances.
4962
 */
4963
static int active_load_balance_cpu_stop(void *data)
4964
{
4965 4966
	struct rq *busiest_rq = data;
	int busiest_cpu = cpu_of(busiest_rq);
4967
	int target_cpu = busiest_rq->push_cpu;
4968
	struct rq *target_rq = cpu_rq(target_cpu);
4969
	struct sched_domain *sd;
4970 4971 4972 4973 4974 4975 4976

	raw_spin_lock_irq(&busiest_rq->lock);

	/* make sure the requested cpu hasn't gone down in the meantime */
	if (unlikely(busiest_cpu != smp_processor_id() ||
		     !busiest_rq->active_balance))
		goto out_unlock;
4977 4978 4979

	/* Is there any task to move? */
	if (busiest_rq->nr_running <= 1)
4980
		goto out_unlock;
4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992

	/*
	 * This condition is "impossible", if it occurs
	 * we need to fix it. Originally reported by
	 * Bjorn Helgaas on a 128-cpu setup.
	 */
	BUG_ON(busiest_rq == target_rq);

	/* move a task from busiest_rq to target_rq */
	double_lock_balance(busiest_rq, target_rq);

	/* Search for an sd spanning us and the target CPU. */
4993
	rcu_read_lock();
4994 4995 4996 4997 4998 4999 5000
	for_each_domain(target_cpu, sd) {
		if ((sd->flags & SD_LOAD_BALANCE) &&
		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
				break;
	}

	if (likely(sd)) {
5001 5002
		struct lb_env env = {
			.sd		= sd,
5003 5004 5005 5006
			.dst_cpu	= target_cpu,
			.dst_rq		= target_rq,
			.src_cpu	= busiest_rq->cpu,
			.src_rq		= busiest_rq,
5007 5008 5009
			.idle		= CPU_IDLE,
		};

5010 5011
		schedstat_inc(sd, alb_count);

5012
		if (move_one_task(&env))
5013 5014 5015 5016
			schedstat_inc(sd, alb_pushed);
		else
			schedstat_inc(sd, alb_failed);
	}
5017
	rcu_read_unlock();
5018
	double_unlock_balance(busiest_rq, target_rq);
5019 5020 5021 5022
out_unlock:
	busiest_rq->active_balance = 0;
	raw_spin_unlock_irq(&busiest_rq->lock);
	return 0;
5023 5024 5025
}

#ifdef CONFIG_NO_HZ
5026 5027 5028 5029 5030 5031
/*
 * idle load balancing details
 * - When one of the busy CPUs notice that there may be an idle rebalancing
 *   needed, they will kick the idle load balancer, which then does idle
 *   load balancing for all the idle CPUs.
 */
5032
static struct {
5033
	cpumask_var_t idle_cpus_mask;
5034
	atomic_t nr_cpus;
5035 5036
	unsigned long next_balance;     /* in jiffy units */
} nohz ____cacheline_aligned;
5037

5038
static inline int find_new_ilb(int call_cpu)
5039
{
5040
	int ilb = cpumask_first(nohz.idle_cpus_mask);
5041

5042 5043 5044 5045
	if (ilb < nr_cpu_ids && idle_cpu(ilb))
		return ilb;

	return nr_cpu_ids;
5046 5047
}

5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058
/*
 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
 * CPU (if there is one).
 */
static void nohz_balancer_kick(int cpu)
{
	int ilb_cpu;

	nohz.next_balance++;

5059
	ilb_cpu = find_new_ilb(cpu);
5060

5061 5062
	if (ilb_cpu >= nr_cpu_ids)
		return;
5063

5064
	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
5065 5066 5067 5068 5069 5070 5071 5072
		return;
	/*
	 * Use smp_send_reschedule() instead of resched_cpu().
	 * This way we generate a sched IPI on the target cpu which
	 * is idle. And the softirq performing nohz idle load balance
	 * will be run before returning from the IPI.
	 */
	smp_send_reschedule(ilb_cpu);
5073 5074 5075
	return;
}

5076
static inline void nohz_balance_exit_idle(int cpu)
5077 5078 5079 5080 5081 5082 5083 5084
{
	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
		atomic_dec(&nohz.nr_cpus);
		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
	}
}

5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114
static inline void set_cpu_sd_state_busy(void)
{
	struct sched_domain *sd;
	int cpu = smp_processor_id();

	if (!test_bit(NOHZ_IDLE, nohz_flags(cpu)))
		return;
	clear_bit(NOHZ_IDLE, nohz_flags(cpu));

	rcu_read_lock();
	for_each_domain(cpu, sd)
		atomic_inc(&sd->groups->sgp->nr_busy_cpus);
	rcu_read_unlock();
}

void set_cpu_sd_state_idle(void)
{
	struct sched_domain *sd;
	int cpu = smp_processor_id();

	if (test_bit(NOHZ_IDLE, nohz_flags(cpu)))
		return;
	set_bit(NOHZ_IDLE, nohz_flags(cpu));

	rcu_read_lock();
	for_each_domain(cpu, sd)
		atomic_dec(&sd->groups->sgp->nr_busy_cpus);
	rcu_read_unlock();
}

5115
/*
5116
 * This routine will record that the cpu is going idle with tick stopped.
5117
 * This info will be used in performing idle load balancing in the future.
5118
 */
5119
void nohz_balance_enter_idle(int cpu)
5120
{
5121 5122 5123 5124 5125 5126
	/*
	 * If this cpu is going down, then nothing needs to be done.
	 */
	if (!cpu_active(cpu))
		return;

5127 5128
	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
		return;
5129

5130 5131 5132
	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
	atomic_inc(&nohz.nr_cpus);
	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5133
}
5134 5135 5136 5137 5138 5139

static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
					unsigned long action, void *hcpu)
{
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_DYING:
5140
		nohz_balance_exit_idle(smp_processor_id());
5141 5142 5143 5144 5145
		return NOTIFY_OK;
	default:
		return NOTIFY_DONE;
	}
}
5146 5147 5148 5149
#endif

static DEFINE_SPINLOCK(balancing);

5150 5151 5152 5153
/*
 * Scale the max load_balance interval with the number of CPUs in the system.
 * This trades load-balance latency on larger machines for less cross talk.
 */
5154
void update_max_interval(void)
5155 5156 5157 5158
{
	max_load_balance_interval = HZ*num_online_cpus()/10;
}

5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169
/*
 * It checks each scheduling domain to see if it is due to be balanced,
 * and initiates a balancing operation if so.
 *
 * Balancing parameters are set up in arch_init_sched_domains.
 */
static void rebalance_domains(int cpu, enum cpu_idle_type idle)
{
	int balance = 1;
	struct rq *rq = cpu_rq(cpu);
	unsigned long interval;
5170
	struct sched_domain *sd;
5171 5172 5173 5174 5175
	/* Earliest time when we have to do rebalance again */
	unsigned long next_balance = jiffies + 60*HZ;
	int update_next_balance = 0;
	int need_serialize;

P
Peter Zijlstra 已提交
5176 5177
	update_shares(cpu);

5178
	rcu_read_lock();
5179 5180 5181 5182 5183 5184 5185 5186 5187 5188
	for_each_domain(cpu, sd) {
		if (!(sd->flags & SD_LOAD_BALANCE))
			continue;

		interval = sd->balance_interval;
		if (idle != CPU_IDLE)
			interval *= sd->busy_factor;

		/* scale ms to jiffies */
		interval = msecs_to_jiffies(interval);
5189
		interval = clamp(interval, 1UL, max_load_balance_interval);
5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201

		need_serialize = sd->flags & SD_SERIALIZE;

		if (need_serialize) {
			if (!spin_trylock(&balancing))
				goto out;
		}

		if (time_after_eq(jiffies, sd->last_balance + interval)) {
			if (load_balance(cpu, rq, sd, idle, &balance)) {
				/*
				 * We've pulled tasks over so either we're no
5202
				 * longer idle.
5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223
				 */
				idle = CPU_NOT_IDLE;
			}
			sd->last_balance = jiffies;
		}
		if (need_serialize)
			spin_unlock(&balancing);
out:
		if (time_after(next_balance, sd->last_balance + interval)) {
			next_balance = sd->last_balance + interval;
			update_next_balance = 1;
		}

		/*
		 * Stop the load balance at this level. There is another
		 * CPU in our sched group which is doing load balancing more
		 * actively.
		 */
		if (!balance)
			break;
	}
5224
	rcu_read_unlock();
5225 5226 5227 5228 5229 5230 5231 5232 5233 5234

	/*
	 * next_balance will be updated only when there is a need.
	 * When the cpu is attached to null domain for ex, it will not be
	 * updated.
	 */
	if (likely(update_next_balance))
		rq->next_balance = next_balance;
}

5235
#ifdef CONFIG_NO_HZ
5236
/*
5237
 * In CONFIG_NO_HZ case, the idle balance kickee will do the
5238 5239
 * rebalancing for all the cpus for whom scheduler ticks are stopped.
 */
5240 5241 5242 5243 5244 5245
static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
{
	struct rq *this_rq = cpu_rq(this_cpu);
	struct rq *rq;
	int balance_cpu;

5246 5247 5248
	if (idle != CPU_IDLE ||
	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
		goto end;
5249 5250

	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
5251
		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
5252 5253 5254 5255 5256 5257 5258
			continue;

		/*
		 * If this cpu gets work to do, stop the load balancing
		 * work being done for other cpus. Next load
		 * balancing owner will pick it up.
		 */
5259
		if (need_resched())
5260 5261
			break;

V
Vincent Guittot 已提交
5262 5263 5264 5265 5266 5267
		rq = cpu_rq(balance_cpu);

		raw_spin_lock_irq(&rq->lock);
		update_rq_clock(rq);
		update_idle_cpu_load(rq);
		raw_spin_unlock_irq(&rq->lock);
5268 5269 5270 5271 5272 5273 5274

		rebalance_domains(balance_cpu, CPU_IDLE);

		if (time_after(this_rq->next_balance, rq->next_balance))
			this_rq->next_balance = rq->next_balance;
	}
	nohz.next_balance = this_rq->next_balance;
5275 5276
end:
	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
5277 5278 5279
}

/*
5280 5281 5282 5283 5284 5285 5286
 * Current heuristic for kicking the idle load balancer in the presence
 * of an idle cpu is the system.
 *   - This rq has more than one task.
 *   - At any scheduler domain level, this cpu's scheduler group has multiple
 *     busy cpu's exceeding the group's power.
 *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
 *     domain span are idle.
5287 5288 5289 5290
 */
static inline int nohz_kick_needed(struct rq *rq, int cpu)
{
	unsigned long now = jiffies;
5291
	struct sched_domain *sd;
5292

5293
	if (unlikely(idle_cpu(cpu)))
5294 5295
		return 0;

5296 5297 5298 5299
       /*
	* We may be recently in ticked or tickless idle mode. At the first
	* busy tick after returning from idle, we will update the busy stats.
	*/
5300
	set_cpu_sd_state_busy();
5301
	nohz_balance_exit_idle(cpu);
5302 5303 5304 5305 5306 5307 5308

	/*
	 * None are in tickless mode and hence no need for NOHZ idle load
	 * balancing.
	 */
	if (likely(!atomic_read(&nohz.nr_cpus)))
		return 0;
5309 5310

	if (time_before(now, nohz.next_balance))
5311 5312
		return 0;

5313 5314
	if (rq->nr_running >= 2)
		goto need_kick;
5315

5316
	rcu_read_lock();
5317 5318 5319 5320
	for_each_domain(cpu, sd) {
		struct sched_group *sg = sd->groups;
		struct sched_group_power *sgp = sg->sgp;
		int nr_busy = atomic_read(&sgp->nr_busy_cpus);
5321

5322
		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
5323
			goto need_kick_unlock;
5324 5325 5326 5327

		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
		    && (cpumask_first_and(nohz.idle_cpus_mask,
					  sched_domain_span(sd)) < cpu))
5328
			goto need_kick_unlock;
5329 5330 5331

		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
			break;
5332
	}
5333
	rcu_read_unlock();
5334
	return 0;
5335 5336 5337

need_kick_unlock:
	rcu_read_unlock();
5338 5339
need_kick:
	return 1;
5340 5341 5342 5343 5344 5345 5346 5347 5348
}
#else
static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
#endif

/*
 * run_rebalance_domains is triggered when needed from the scheduler tick.
 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
 */
5349 5350 5351 5352
static void run_rebalance_domains(struct softirq_action *h)
{
	int this_cpu = smp_processor_id();
	struct rq *this_rq = cpu_rq(this_cpu);
5353
	enum cpu_idle_type idle = this_rq->idle_balance ?
5354 5355 5356 5357 5358
						CPU_IDLE : CPU_NOT_IDLE;

	rebalance_domains(this_cpu, idle);

	/*
5359
	 * If this cpu has a pending nohz_balance_kick, then do the
5360 5361 5362
	 * balancing on behalf of the other idle cpus whose ticks are
	 * stopped.
	 */
5363
	nohz_idle_balance(this_cpu, idle);
5364 5365 5366 5367
}

static inline int on_null_domain(int cpu)
{
5368
	return !rcu_dereference_sched(cpu_rq(cpu)->sd);
5369 5370 5371 5372 5373
}

/*
 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
 */
5374
void trigger_load_balance(struct rq *rq, int cpu)
5375 5376 5377 5378 5379
{
	/* Don't need to rebalance while attached to NULL domain */
	if (time_after_eq(jiffies, rq->next_balance) &&
	    likely(!on_null_domain(cpu)))
		raise_softirq(SCHED_SOFTIRQ);
5380
#ifdef CONFIG_NO_HZ
5381
	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5382 5383
		nohz_balancer_kick(cpu);
#endif
5384 5385
}

5386 5387 5388 5389 5390 5391 5392 5393
static void rq_online_fair(struct rq *rq)
{
	update_sysctl();
}

static void rq_offline_fair(struct rq *rq)
{
	update_sysctl();
5394 5395 5396

	/* Ensure any throttled groups are reachable by pick_next_task */
	unthrottle_offline_cfs_rqs(rq);
5397 5398
}

5399
#endif /* CONFIG_SMP */
5400

5401 5402 5403
/*
 * scheduler tick hitting a task of our scheduling class:
 */
P
Peter Zijlstra 已提交
5404
static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
5405 5406 5407 5408 5409 5410
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &curr->se;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
P
Peter Zijlstra 已提交
5411
		entity_tick(cfs_rq, se, queued);
5412
	}
5413 5414

	update_rq_runnable_avg(rq, 1);
5415 5416 5417
}

/*
P
Peter Zijlstra 已提交
5418 5419 5420
 * called on fork with the child task as argument from the parent's context
 *  - child not yet on the tasklist
 *  - preemption disabled
5421
 */
P
Peter Zijlstra 已提交
5422
static void task_fork_fair(struct task_struct *p)
5423
{
5424 5425
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se, *curr;
5426
	int this_cpu = smp_processor_id();
P
Peter Zijlstra 已提交
5427 5428 5429
	struct rq *rq = this_rq();
	unsigned long flags;

5430
	raw_spin_lock_irqsave(&rq->lock, flags);
5431

5432 5433
	update_rq_clock(rq);

5434 5435 5436
	cfs_rq = task_cfs_rq(current);
	curr = cfs_rq->curr;

5437 5438
	if (unlikely(task_cpu(p) != this_cpu)) {
		rcu_read_lock();
P
Peter Zijlstra 已提交
5439
		__set_task_cpu(p, this_cpu);
5440 5441
		rcu_read_unlock();
	}
5442

5443
	update_curr(cfs_rq);
P
Peter Zijlstra 已提交
5444

5445 5446
	if (curr)
		se->vruntime = curr->vruntime;
5447
	place_entity(cfs_rq, se, 1);
5448

P
Peter Zijlstra 已提交
5449
	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
D
Dmitry Adamushko 已提交
5450
		/*
5451 5452 5453
		 * Upon rescheduling, sched_class::put_prev_task() will place
		 * 'current' within the tree based on its new key value.
		 */
5454
		swap(curr->vruntime, se->vruntime);
5455
		resched_task(rq->curr);
5456
	}
5457

5458 5459
	se->vruntime -= cfs_rq->min_vruntime;

5460
	raw_spin_unlock_irqrestore(&rq->lock, flags);
5461 5462
}

5463 5464 5465 5466
/*
 * Priority of the task has changed. Check to see if we preempt
 * the current task.
 */
P
Peter Zijlstra 已提交
5467 5468
static void
prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
5469
{
P
Peter Zijlstra 已提交
5470 5471 5472
	if (!p->se.on_rq)
		return;

5473 5474 5475 5476 5477
	/*
	 * Reschedule if we are currently running on this runqueue and
	 * our priority decreased, or if we are not currently running on
	 * this runqueue and our priority is higher than the current's
	 */
P
Peter Zijlstra 已提交
5478
	if (rq->curr == p) {
5479 5480 5481
		if (p->prio > oldprio)
			resched_task(rq->curr);
	} else
5482
		check_preempt_curr(rq, p, 0);
5483 5484
}

P
Peter Zijlstra 已提交
5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506
static void switched_from_fair(struct rq *rq, struct task_struct *p)
{
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);

	/*
	 * Ensure the task's vruntime is normalized, so that when its
	 * switched back to the fair class the enqueue_entity(.flags=0) will
	 * do the right thing.
	 *
	 * If it was on_rq, then the dequeue_entity(.flags=0) will already
	 * have normalized the vruntime, if it was !on_rq, then only when
	 * the task is sleeping will it still have non-normalized vruntime.
	 */
	if (!se->on_rq && p->state != TASK_RUNNING) {
		/*
		 * Fix up our vruntime so that the current sleep doesn't
		 * cause 'unlimited' sleep bonus.
		 */
		place_entity(cfs_rq, se, 0);
		se->vruntime -= cfs_rq->min_vruntime;
	}
5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520

#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
	/*
	* Remove our load from contribution when we leave sched_fair
	* and ensure we don't carry in an old decay_count if we
	* switch back.
	*/
	if (p->se.avg.decay_count) {
		struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
		__synchronize_entity_decay(&p->se);
		subtract_blocked_load_contrib(cfs_rq,
				p->se.avg.load_avg_contrib);
	}
#endif
P
Peter Zijlstra 已提交
5521 5522
}

5523 5524 5525
/*
 * We switched to the sched_fair class.
 */
P
Peter Zijlstra 已提交
5526
static void switched_to_fair(struct rq *rq, struct task_struct *p)
5527
{
P
Peter Zijlstra 已提交
5528 5529 5530
	if (!p->se.on_rq)
		return;

5531 5532 5533 5534 5535
	/*
	 * We were most likely switched from sched_rt, so
	 * kick off the schedule if running, otherwise just see
	 * if we can still preempt the current task.
	 */
P
Peter Zijlstra 已提交
5536
	if (rq->curr == p)
5537 5538
		resched_task(rq->curr);
	else
5539
		check_preempt_curr(rq, p, 0);
5540 5541
}

5542 5543 5544 5545 5546 5547 5548 5549 5550
/* Account for a task changing its policy or group.
 *
 * This routine is mostly called to set cfs_rq->curr field when a task
 * migrates between groups/classes.
 */
static void set_curr_task_fair(struct rq *rq)
{
	struct sched_entity *se = &rq->curr->se;

5551 5552 5553 5554 5555 5556 5557
	for_each_sched_entity(se) {
		struct cfs_rq *cfs_rq = cfs_rq_of(se);

		set_next_entity(cfs_rq, se);
		/* ensure bandwidth has been allocated on our new cfs_rq */
		account_cfs_rq_runtime(cfs_rq, 0);
	}
5558 5559
}

5560 5561 5562 5563 5564 5565 5566
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
	cfs_rq->tasks_timeline = RB_ROOT;
	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
#ifndef CONFIG_64BIT
	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
5567 5568
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
	atomic64_set(&cfs_rq->decay_counter, 1);
5569
	atomic64_set(&cfs_rq->removed_load, 0);
5570
#endif
5571 5572
}

P
Peter Zijlstra 已提交
5573
#ifdef CONFIG_FAIR_GROUP_SCHED
5574
static void task_move_group_fair(struct task_struct *p, int on_rq)
P
Peter Zijlstra 已提交
5575
{
5576
	struct cfs_rq *cfs_rq;
5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589
	/*
	 * If the task was not on the rq at the time of this cgroup movement
	 * it must have been asleep, sleeping tasks keep their ->vruntime
	 * absolute on their old rq until wakeup (needed for the fair sleeper
	 * bonus in place_entity()).
	 *
	 * If it was on the rq, we've just 'preempted' it, which does convert
	 * ->vruntime to a relative base.
	 *
	 * Make sure both cases convert their relative position when migrating
	 * to another cgroup's rq. This does somewhat interfere with the
	 * fair sleeper stuff for the first placement, but who cares.
	 */
5590 5591 5592 5593 5594 5595
	/*
	 * When !on_rq, vruntime of the task has usually NOT been normalized.
	 * But there are some cases where it has already been normalized:
	 *
	 * - Moving a forked child which is waiting for being woken up by
	 *   wake_up_new_task().
5596 5597
	 * - Moving a task which has been woken up by try_to_wake_up() and
	 *   waiting for actually being woken up by sched_ttwu_pending().
5598 5599 5600 5601
	 *
	 * To prevent boost or penalty in the new cfs_rq caused by delta
	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
	 */
5602
	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
5603 5604
		on_rq = 1;

5605 5606 5607
	if (!on_rq)
		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
	set_task_rq(p, task_cpu(p));
5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620
	if (!on_rq) {
		cfs_rq = cfs_rq_of(&p->se);
		p->se.vruntime += cfs_rq->min_vruntime;
#ifdef CONFIG_SMP
		/*
		 * migrate_task_rq_fair() will have removed our previous
		 * contribution, but we must synchronize for ongoing future
		 * decay.
		 */
		p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
		cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
#endif
	}
P
Peter Zijlstra 已提交
5621
}
5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707

void free_fair_sched_group(struct task_group *tg)
{
	int i;

	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));

	for_each_possible_cpu(i) {
		if (tg->cfs_rq)
			kfree(tg->cfs_rq[i]);
		if (tg->se)
			kfree(tg->se[i]);
	}

	kfree(tg->cfs_rq);
	kfree(tg->se);
}

int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se;
	int i;

	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
	if (!tg->cfs_rq)
		goto err;
	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
	if (!tg->se)
		goto err;

	tg->shares = NICE_0_LOAD;

	init_cfs_bandwidth(tg_cfs_bandwidth(tg));

	for_each_possible_cpu(i) {
		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
				      GFP_KERNEL, cpu_to_node(i));
		if (!cfs_rq)
			goto err;

		se = kzalloc_node(sizeof(struct sched_entity),
				  GFP_KERNEL, cpu_to_node(i));
		if (!se)
			goto err_free_rq;

		init_cfs_rq(cfs_rq);
		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
	}

	return 1;

err_free_rq:
	kfree(cfs_rq);
err:
	return 0;
}

void unregister_fair_sched_group(struct task_group *tg, int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;

	/*
	* Only empty task groups can be destroyed; so we can speculatively
	* check on_list without danger of it being re-added.
	*/
	if (!tg->cfs_rq[cpu]->on_list)
		return;

	raw_spin_lock_irqsave(&rq->lock, flags);
	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}

void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
			struct sched_entity *se, int cpu,
			struct sched_entity *parent)
{
	struct rq *rq = cpu_rq(cpu);

	cfs_rq->tg = tg;
	cfs_rq->rq = rq;
#ifdef CONFIG_SMP
	/* allow initial update_cfs_load() to truncate */
	cfs_rq->load_stamp = 1;
P
Peter Zijlstra 已提交
5708
#endif
5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776
	init_cfs_rq_runtime(cfs_rq);

	tg->cfs_rq[cpu] = cfs_rq;
	tg->se[cpu] = se;

	/* se could be NULL for root_task_group */
	if (!se)
		return;

	if (!parent)
		se->cfs_rq = &rq->cfs;
	else
		se->cfs_rq = parent->my_q;

	se->my_q = cfs_rq;
	update_load_set(&se->load, 0);
	se->parent = parent;
}

static DEFINE_MUTEX(shares_mutex);

int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
	int i;
	unsigned long flags;

	/*
	 * We can't change the weight of the root cgroup.
	 */
	if (!tg->se[0])
		return -EINVAL;

	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));

	mutex_lock(&shares_mutex);
	if (tg->shares == shares)
		goto done;

	tg->shares = shares;
	for_each_possible_cpu(i) {
		struct rq *rq = cpu_rq(i);
		struct sched_entity *se;

		se = tg->se[i];
		/* Propagate contribution to hierarchy */
		raw_spin_lock_irqsave(&rq->lock, flags);
		for_each_sched_entity(se)
			update_cfs_shares(group_cfs_rq(se));
		raw_spin_unlock_irqrestore(&rq->lock, flags);
	}

done:
	mutex_unlock(&shares_mutex);
	return 0;
}
#else /* CONFIG_FAIR_GROUP_SCHED */

void free_fair_sched_group(struct task_group *tg) { }

int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
	return 1;
}

void unregister_fair_sched_group(struct task_group *tg, int cpu) { }

#endif /* CONFIG_FAIR_GROUP_SCHED */

P
Peter Zijlstra 已提交
5777

5778
static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792
{
	struct sched_entity *se = &task->se;
	unsigned int rr_interval = 0;

	/*
	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
	 * idle runqueue:
	 */
	if (rq->cfs.load.weight)
		rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));

	return rr_interval;
}

5793 5794 5795
/*
 * All the scheduling class methods:
 */
5796
const struct sched_class fair_sched_class = {
5797
	.next			= &idle_sched_class,
5798 5799 5800
	.enqueue_task		= enqueue_task_fair,
	.dequeue_task		= dequeue_task_fair,
	.yield_task		= yield_task_fair,
5801
	.yield_to_task		= yield_to_task_fair,
5802

I
Ingo Molnar 已提交
5803
	.check_preempt_curr	= check_preempt_wakeup,
5804 5805 5806 5807

	.pick_next_task		= pick_next_task_fair,
	.put_prev_task		= put_prev_task_fair,

5808
#ifdef CONFIG_SMP
L
Li Zefan 已提交
5809
	.select_task_rq		= select_task_rq_fair,
5810
	.migrate_task_rq	= migrate_task_rq_fair,
L
Li Zefan 已提交
5811

5812 5813
	.rq_online		= rq_online_fair,
	.rq_offline		= rq_offline_fair,
5814 5815

	.task_waking		= task_waking_fair,
5816
#endif
5817

5818
	.set_curr_task          = set_curr_task_fair,
5819
	.task_tick		= task_tick_fair,
P
Peter Zijlstra 已提交
5820
	.task_fork		= task_fork_fair,
5821 5822

	.prio_changed		= prio_changed_fair,
P
Peter Zijlstra 已提交
5823
	.switched_from		= switched_from_fair,
5824
	.switched_to		= switched_to_fair,
P
Peter Zijlstra 已提交
5825

5826 5827
	.get_rr_interval	= get_rr_interval_fair,

P
Peter Zijlstra 已提交
5828
#ifdef CONFIG_FAIR_GROUP_SCHED
5829
	.task_move_group	= task_move_group_fair,
P
Peter Zijlstra 已提交
5830
#endif
5831 5832 5833
};

#ifdef CONFIG_SCHED_DEBUG
5834
void print_cfs_stats(struct seq_file *m, int cpu)
5835 5836 5837
{
	struct cfs_rq *cfs_rq;

5838
	rcu_read_lock();
5839
	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5840
		print_cfs_rq(m, cpu, cfs_rq);
5841
	rcu_read_unlock();
5842 5843
}
#endif
5844 5845 5846 5847 5848 5849 5850

__init void init_sched_fair_class(void)
{
#ifdef CONFIG_SMP
	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);

#ifdef CONFIG_NO_HZ
5851
	nohz.next_balance = jiffies;
5852
	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
5853
	cpu_notifier(sched_ilb_notifier, 0);
5854 5855 5856 5857
#endif
#endif /* SMP */

}