core.c 189.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  kernel/sched/core.c
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 *  Kernel scheduler and related syscalls
 *
 *  Copyright (C) 1991-2002  Linus Torvalds
 *
 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
 *		make semaphores SMP safe
 *  1998-11-19	Implemented schedule_timeout() and related stuff
 *		by Andrea Arcangeli
 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
 *		hybrid priority-list and round-robin design with
 *		an array-switch method of distributing timeslices
 *		and per-CPU runqueues.  Cleanups and useful suggestions
 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
 *  2003-09-03	Interactivity tuning by Con Kolivas.
 *  2004-04-02	Scheduler domains code by Nick Piggin
I
Ingo Molnar 已提交
19 20 21 22 23 24
 *  2007-04-15  Work begun on replacing all interactivity tuning with a
 *              fair scheduling design by Con Kolivas.
 *  2007-05-05  Load balancing (smp-nice) and other improvements
 *              by Peter Williams
 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
25 26
 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
 *              Thomas Gleixner, Mike Kravetz
L
Linus Torvalds 已提交
27 28 29 30 31 32
 */

#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/init.h>
33
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
34 35 36
#include <linux/highmem.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
37
#include <linux/capability.h>
L
Linus Torvalds 已提交
38 39
#include <linux/completion.h>
#include <linux/kernel_stat.h>
40
#include <linux/debug_locks.h>
41
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
42 43 44
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
45
#include <linux/freezer.h>
46
#include <linux/vmalloc.h>
L
Linus Torvalds 已提交
47 48
#include <linux/blkdev.h>
#include <linux/delay.h>
49
#include <linux/pid_namespace.h>
L
Linus Torvalds 已提交
50 51 52 53 54 55 56
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
57
#include <linux/proc_fs.h>
L
Linus Torvalds 已提交
58
#include <linux/seq_file.h>
59
#include <linux/sysctl.h>
L
Linus Torvalds 已提交
60 61
#include <linux/syscalls.h>
#include <linux/times.h>
62
#include <linux/tsacct_kern.h>
63
#include <linux/kprobes.h>
64
#include <linux/delayacct.h>
65
#include <linux/unistd.h>
J
Jens Axboe 已提交
66
#include <linux/pagemap.h>
P
Peter Zijlstra 已提交
67
#include <linux/hrtimer.h>
R
Reynes Philippe 已提交
68
#include <linux/tick.h>
P
Peter Zijlstra 已提交
69 70
#include <linux/debugfs.h>
#include <linux/ctype.h>
71
#include <linux/ftrace.h>
72
#include <linux/slab.h>
73
#include <linux/init_task.h>
A
Al Viro 已提交
74
#include <linux/binfmts.h>
75
#include <linux/context_tracking.h>
76
#include <linux/compiler.h>
L
Linus Torvalds 已提交
77

78
#include <asm/switch_to.h>
79
#include <asm/tlb.h>
80
#include <asm/irq_regs.h>
81
#include <asm/mutex.h>
G
Glauber Costa 已提交
82 83 84
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
L
Linus Torvalds 已提交
85

86
#include "sched.h"
87
#include "../workqueue_internal.h"
88
#include "../smpboot.h"
89

90
#define CREATE_TRACE_POINTS
91
#include <trace/events/sched.h>
92

93
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
94
{
95 96
	unsigned long delta;
	ktime_t soft, hard, now;
97

98 99 100 101 102 103
	for (;;) {
		if (hrtimer_active(period_timer))
			break;

		now = hrtimer_cb_get_time(period_timer);
		hrtimer_forward(period_timer, now, period);
104

105 106 107 108 109 110 111 112
		soft = hrtimer_get_softexpires(period_timer);
		hard = hrtimer_get_expires(period_timer);
		delta = ktime_to_ns(ktime_sub(hard, soft));
		__hrtimer_start_range_ns(period_timer, soft, delta,
					 HRTIMER_MODE_ABS_PINNED, 0);
	}
}

113 114
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
115

116
static void update_rq_clock_task(struct rq *rq, s64 delta);
117

118
void update_rq_clock(struct rq *rq)
119
{
120
	s64 delta;
121

122
	if (rq->skip_clock_update > 0)
123
		return;
124

125 126 127
	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
	rq->clock += delta;
	update_rq_clock_task(rq, delta);
128 129
}

I
Ingo Molnar 已提交
130 131 132
/*
 * Debugging: various feature bits
 */
P
Peter Zijlstra 已提交
133 134 135 136

#define SCHED_FEAT(name, enabled)	\
	(1UL << __SCHED_FEAT_##name) * enabled |

I
Ingo Molnar 已提交
137
const_debug unsigned int sysctl_sched_features =
138
#include "features.h"
P
Peter Zijlstra 已提交
139 140 141 142 143 144 145 146
	0;

#undef SCHED_FEAT

#ifdef CONFIG_SCHED_DEBUG
#define SCHED_FEAT(name, enabled)	\
	#name ,

147
static const char * const sched_feat_names[] = {
148
#include "features.h"
P
Peter Zijlstra 已提交
149 150 151 152
};

#undef SCHED_FEAT

L
Li Zefan 已提交
153
static int sched_feat_show(struct seq_file *m, void *v)
P
Peter Zijlstra 已提交
154 155 156
{
	int i;

157
	for (i = 0; i < __SCHED_FEAT_NR; i++) {
L
Li Zefan 已提交
158 159 160
		if (!(sysctl_sched_features & (1UL << i)))
			seq_puts(m, "NO_");
		seq_printf(m, "%s ", sched_feat_names[i]);
P
Peter Zijlstra 已提交
161
	}
L
Li Zefan 已提交
162
	seq_puts(m, "\n");
P
Peter Zijlstra 已提交
163

L
Li Zefan 已提交
164
	return 0;
P
Peter Zijlstra 已提交
165 166
}

167 168
#ifdef HAVE_JUMP_LABEL

169 170
#define jump_label_key__true  STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE
171 172 173 174

#define SCHED_FEAT(name, enabled)	\
	jump_label_key__##enabled ,

175
struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
176 177 178 179 180 181 182
#include "features.h"
};

#undef SCHED_FEAT

static void sched_feat_disable(int i)
{
183 184
	if (static_key_enabled(&sched_feat_keys[i]))
		static_key_slow_dec(&sched_feat_keys[i]);
185 186 187 188
}

static void sched_feat_enable(int i)
{
189 190
	if (!static_key_enabled(&sched_feat_keys[i]))
		static_key_slow_inc(&sched_feat_keys[i]);
191 192 193 194 195 196
}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */

197
static int sched_feat_set(char *cmp)
P
Peter Zijlstra 已提交
198 199
{
	int i;
200
	int neg = 0;
P
Peter Zijlstra 已提交
201

H
Hillf Danton 已提交
202
	if (strncmp(cmp, "NO_", 3) == 0) {
P
Peter Zijlstra 已提交
203 204 205 206
		neg = 1;
		cmp += 3;
	}

207
	for (i = 0; i < __SCHED_FEAT_NR; i++) {
208
		if (strcmp(cmp, sched_feat_names[i]) == 0) {
209
			if (neg) {
P
Peter Zijlstra 已提交
210
				sysctl_sched_features &= ~(1UL << i);
211 212
				sched_feat_disable(i);
			} else {
P
Peter Zijlstra 已提交
213
				sysctl_sched_features |= (1UL << i);
214 215
				sched_feat_enable(i);
			}
P
Peter Zijlstra 已提交
216 217 218 219
			break;
		}
	}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
	return i;
}

static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	char buf[64];
	char *cmp;
	int i;

	if (cnt > 63)
		cnt = 63;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;
	cmp = strstrip(buf);

	i = sched_feat_set(cmp);
241
	if (i == __SCHED_FEAT_NR)
P
Peter Zijlstra 已提交
242 243
		return -EINVAL;

244
	*ppos += cnt;
P
Peter Zijlstra 已提交
245 246 247 248

	return cnt;
}

L
Li Zefan 已提交
249 250 251 252 253
static int sched_feat_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, sched_feat_show, NULL);
}

254
static const struct file_operations sched_feat_fops = {
L
Li Zefan 已提交
255 256 257 258 259
	.open		= sched_feat_open,
	.write		= sched_feat_write,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
P
Peter Zijlstra 已提交
260 261 262 263 264 265 266 267 268 269
};

static __init int sched_init_debug(void)
{
	debugfs_create_file("sched_features", 0644, NULL, NULL,
			&sched_feat_fops);

	return 0;
}
late_initcall(sched_init_debug);
270
#endif /* CONFIG_SCHED_DEBUG */
I
Ingo Molnar 已提交
271

272 273 274 275 276 277
/*
 * Number of tasks to iterate in a single balance run.
 * Limited because this is done with IRQs disabled.
 */
const_debug unsigned int sysctl_sched_nr_migrate = 32;

278 279 280 281 282 283 284 285
/*
 * period over which we average the RT time consumption, measured
 * in ms.
 *
 * default: 1s
 */
const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;

P
Peter Zijlstra 已提交
286
/*
P
Peter Zijlstra 已提交
287
 * period over which we measure -rt task cpu usage in us.
P
Peter Zijlstra 已提交
288 289
 * default: 1s
 */
P
Peter Zijlstra 已提交
290
unsigned int sysctl_sched_rt_period = 1000000;
P
Peter Zijlstra 已提交
291

292
__read_mostly int scheduler_running;
293

P
Peter Zijlstra 已提交
294 295 296 297 298
/*
 * part of the period that we allow rt tasks to run in us.
 * default: 0.95s
 */
int sysctl_sched_rt_runtime = 950000;
P
Peter Zijlstra 已提交
299

300
/*
301
 * __task_rq_lock - lock the rq @p resides on.
302
 */
303
static inline struct rq *__task_rq_lock(struct task_struct *p)
304 305
	__acquires(rq->lock)
{
306 307
	struct rq *rq;

308 309
	lockdep_assert_held(&p->pi_lock);

310
	for (;;) {
311
		rq = task_rq(p);
312
		raw_spin_lock(&rq->lock);
P
Peter Zijlstra 已提交
313
		if (likely(rq == task_rq(p)))
314
			return rq;
315
		raw_spin_unlock(&rq->lock);
316 317 318
	}
}

L
Linus Torvalds 已提交
319
/*
320
 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
L
Linus Torvalds 已提交
321
 */
322
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
323
	__acquires(p->pi_lock)
L
Linus Torvalds 已提交
324 325
	__acquires(rq->lock)
{
326
	struct rq *rq;
L
Linus Torvalds 已提交
327

328
	for (;;) {
329
		raw_spin_lock_irqsave(&p->pi_lock, *flags);
330
		rq = task_rq(p);
331
		raw_spin_lock(&rq->lock);
P
Peter Zijlstra 已提交
332
		if (likely(rq == task_rq(p)))
333
			return rq;
334 335
		raw_spin_unlock(&rq->lock);
		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
L
Linus Torvalds 已提交
336 337 338
	}
}

A
Alexey Dobriyan 已提交
339
static void __task_rq_unlock(struct rq *rq)
340 341
	__releases(rq->lock)
{
342
	raw_spin_unlock(&rq->lock);
343 344
}

345 346
static inline void
task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
L
Linus Torvalds 已提交
347
	__releases(rq->lock)
348
	__releases(p->pi_lock)
L
Linus Torvalds 已提交
349
{
350 351
	raw_spin_unlock(&rq->lock);
	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
L
Linus Torvalds 已提交
352 353 354
}

/*
355
 * this_rq_lock - lock this runqueue and disable interrupts.
L
Linus Torvalds 已提交
356
 */
A
Alexey Dobriyan 已提交
357
static struct rq *this_rq_lock(void)
L
Linus Torvalds 已提交
358 359
	__acquires(rq->lock)
{
360
	struct rq *rq;
L
Linus Torvalds 已提交
361 362 363

	local_irq_disable();
	rq = this_rq();
364
	raw_spin_lock(&rq->lock);
L
Linus Torvalds 已提交
365 366 367 368

	return rq;
}

P
Peter Zijlstra 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
#ifdef CONFIG_SCHED_HRTICK
/*
 * Use HR-timers to deliver accurate preemption points.
 */

static void hrtick_clear(struct rq *rq)
{
	if (hrtimer_active(&rq->hrtick_timer))
		hrtimer_cancel(&rq->hrtick_timer);
}

/*
 * High-resolution timer tick.
 * Runs from hardirq context with interrupts disabled.
 */
static enum hrtimer_restart hrtick(struct hrtimer *timer)
{
	struct rq *rq = container_of(timer, struct rq, hrtick_timer);

	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());

390
	raw_spin_lock(&rq->lock);
391
	update_rq_clock(rq);
P
Peter Zijlstra 已提交
392
	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
393
	raw_spin_unlock(&rq->lock);
P
Peter Zijlstra 已提交
394 395 396 397

	return HRTIMER_NORESTART;
}

398
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
399 400 401 402 403 404 405 406 407

static int __hrtick_restart(struct rq *rq)
{
	struct hrtimer *timer = &rq->hrtick_timer;
	ktime_t time = hrtimer_get_softexpires(timer);

	return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
}

408 409 410 411
/*
 * called from hardirq (IPI) context
 */
static void __hrtick_start(void *arg)
412
{
413
	struct rq *rq = arg;
414

415
	raw_spin_lock(&rq->lock);
P
Peter Zijlstra 已提交
416
	__hrtick_restart(rq);
417
	rq->hrtick_csd_pending = 0;
418
	raw_spin_unlock(&rq->lock);
419 420
}

421 422 423 424 425
/*
 * Called to set the hrtick timer state.
 *
 * called with rq->lock held and irqs disabled
 */
426
void hrtick_start(struct rq *rq, u64 delay)
427
{
428 429
	struct hrtimer *timer = &rq->hrtick_timer;
	ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
430

431
	hrtimer_set_expires(timer, time);
432 433

	if (rq == this_rq()) {
P
Peter Zijlstra 已提交
434
		__hrtick_restart(rq);
435
	} else if (!rq->hrtick_csd_pending) {
436
		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
437 438
		rq->hrtick_csd_pending = 1;
	}
439 440 441 442 443 444 445 446 447 448 449 450 451 452
}

static int
hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
	int cpu = (int)(long)hcpu;

	switch (action) {
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
453
		hrtick_clear(cpu_rq(cpu));
454 455 456 457 458 459
		return NOTIFY_OK;
	}

	return NOTIFY_DONE;
}

460
static __init void init_hrtick(void)
461 462 463
{
	hotcpu_notifier(hotplug_hrtick, 0);
}
464 465 466 467 468 469
#else
/*
 * Called to set the hrtick timer state.
 *
 * called with rq->lock held and irqs disabled
 */
470
void hrtick_start(struct rq *rq, u64 delay)
471
{
472
	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
473
			HRTIMER_MODE_REL_PINNED, 0);
474
}
475

A
Andrew Morton 已提交
476
static inline void init_hrtick(void)
P
Peter Zijlstra 已提交
477 478
{
}
479
#endif /* CONFIG_SMP */
P
Peter Zijlstra 已提交
480

481
static void init_rq_hrtick(struct rq *rq)
P
Peter Zijlstra 已提交
482
{
483 484
#ifdef CONFIG_SMP
	rq->hrtick_csd_pending = 0;
P
Peter Zijlstra 已提交
485

486 487 488 489
	rq->hrtick_csd.flags = 0;
	rq->hrtick_csd.func = __hrtick_start;
	rq->hrtick_csd.info = rq;
#endif
P
Peter Zijlstra 已提交
490

491 492
	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	rq->hrtick_timer.function = hrtick;
P
Peter Zijlstra 已提交
493
}
A
Andrew Morton 已提交
494
#else	/* CONFIG_SCHED_HRTICK */
P
Peter Zijlstra 已提交
495 496 497 498 499 500 501 502
static inline void hrtick_clear(struct rq *rq)
{
}

static inline void init_rq_hrtick(struct rq *rq)
{
}

503 504 505
static inline void init_hrtick(void)
{
}
A
Andrew Morton 已提交
506
#endif	/* CONFIG_SCHED_HRTICK */
P
Peter Zijlstra 已提交
507

I
Ingo Molnar 已提交
508 509 510 511 512 513 514
/*
 * resched_task - mark a task 'to be rescheduled now'.
 *
 * On UP this means the setting of the need_resched flag, on SMP it
 * might also involve a cross-CPU call to trigger the scheduler on
 * the target CPU.
 */
515
void resched_task(struct task_struct *p)
I
Ingo Molnar 已提交
516 517 518
{
	int cpu;

519
	lockdep_assert_held(&task_rq(p)->lock);
I
Ingo Molnar 已提交
520

521
	if (test_tsk_need_resched(p))
I
Ingo Molnar 已提交
522 523
		return;

524
	set_tsk_need_resched(p);
I
Ingo Molnar 已提交
525 526

	cpu = task_cpu(p);
527 528
	if (cpu == smp_processor_id()) {
		set_preempt_need_resched();
I
Ingo Molnar 已提交
529
		return;
530
	}
I
Ingo Molnar 已提交
531 532 533 534 535 536 537

	/* NEED_RESCHED must be visible before we test polling */
	smp_mb();
	if (!tsk_is_polling(p))
		smp_send_reschedule(cpu);
}

538
void resched_cpu(int cpu)
I
Ingo Molnar 已提交
539 540 541 542
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;

543
	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
I
Ingo Molnar 已提交
544 545
		return;
	resched_task(cpu_curr(cpu));
546
	raw_spin_unlock_irqrestore(&rq->lock, flags);
I
Ingo Molnar 已提交
547
}
548

549
#ifdef CONFIG_SMP
550
#ifdef CONFIG_NO_HZ_COMMON
551 552 553 554 555 556 557 558
/*
 * In the semi idle case, use the nearest busy cpu for migrating timers
 * from an idle cpu.  This is good for power-savings.
 *
 * We don't do similar optimization for completely idle system, as
 * selecting an idle cpu will add more delays to the timers than intended
 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
 */
559
int get_nohz_timer_target(int pinned)
560 561 562 563 564
{
	int cpu = smp_processor_id();
	int i;
	struct sched_domain *sd;

565 566 567
	if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
		return cpu;

568
	rcu_read_lock();
569
	for_each_domain(cpu, sd) {
570 571 572 573 574 575
		for_each_cpu(i, sched_domain_span(sd)) {
			if (!idle_cpu(i)) {
				cpu = i;
				goto unlock;
			}
		}
576
	}
577 578
unlock:
	rcu_read_unlock();
579 580
	return cpu;
}
581 582 583 584 585 586 587 588 589 590
/*
 * When add_timer_on() enqueues a timer into the timer wheel of an
 * idle CPU then this timer might expire before the next timer event
 * which is scheduled to wake up that CPU. In case of a completely
 * idle system the next event might even be infinite time into the
 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
 * leaves the inner idle loop so the newly added timer is taken into
 * account when the CPU goes back to idle and evaluates the timer
 * wheel for the next timer event.
 */
591
static void wake_up_idle_cpu(int cpu)
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
{
	struct rq *rq = cpu_rq(cpu);

	if (cpu == smp_processor_id())
		return;

	/*
	 * This is safe, as this function is called with the timer
	 * wheel base lock of (cpu) held. When the CPU is on the way
	 * to idle and has not yet set rq->curr to idle then it will
	 * be serialized on the timer wheel base lock and take the new
	 * timer into account automatically.
	 */
	if (rq->curr != rq->idle)
		return;
607 608

	/*
609 610 611
	 * We can set TIF_RESCHED on the idle task of the other CPU
	 * lockless. The worst case is that the other CPU runs the
	 * idle task through an additional NOOP schedule()
612
	 */
613
	set_tsk_need_resched(rq->idle);
614

615 616 617 618
	/* NEED_RESCHED must be visible before we test polling */
	smp_mb();
	if (!tsk_is_polling(rq->idle))
		smp_send_reschedule(cpu);
619 620
}

621
static bool wake_up_full_nohz_cpu(int cpu)
622
{
623
	if (tick_nohz_full_cpu(cpu)) {
624 625 626 627 628 629 630 631 632 633 634
		if (cpu != smp_processor_id() ||
		    tick_nohz_tick_stopped())
			smp_send_reschedule(cpu);
		return true;
	}

	return false;
}

void wake_up_nohz_cpu(int cpu)
{
635
	if (!wake_up_full_nohz_cpu(cpu))
636 637 638
		wake_up_idle_cpu(cpu);
}

639
static inline bool got_nohz_idle_kick(void)
640
{
641
	int cpu = smp_processor_id();
642 643 644 645 646 647 648 649 650 651 652 653 654

	if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
		return false;

	if (idle_cpu(cpu) && !need_resched())
		return true;

	/*
	 * We can't run Idle Load Balance on this CPU for this time so we
	 * cancel it and clear NOHZ_BALANCE_KICK
	 */
	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
	return false;
655 656
}

657
#else /* CONFIG_NO_HZ_COMMON */
658

659
static inline bool got_nohz_idle_kick(void)
P
Peter Zijlstra 已提交
660
{
661
	return false;
P
Peter Zijlstra 已提交
662 663
}

664
#endif /* CONFIG_NO_HZ_COMMON */
665

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
#ifdef CONFIG_NO_HZ_FULL
bool sched_can_stop_tick(void)
{
       struct rq *rq;

       rq = this_rq();

       /* Make sure rq->nr_running update is visible after the IPI */
       smp_rmb();

       /* More than one running task need preemption */
       if (rq->nr_running > 1)
               return false;

       return true;
}
#endif /* CONFIG_NO_HZ_FULL */
683

684
void sched_avg_update(struct rq *rq)
685
{
686 687
	s64 period = sched_avg_period();

688
	while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
689 690 691 692 693 694
		/*
		 * Inline assembly required to prevent the compiler
		 * optimising this loop into a divmod call.
		 * See __iter_div_u64_rem() for another example of this.
		 */
		asm("" : "+rm" (rq->age_stamp));
695 696 697
		rq->age_stamp += period;
		rq->rt_avg /= 2;
	}
698 699
}

700
#endif /* CONFIG_SMP */
701

702 703
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
704
/*
705 706 707 708
 * Iterate task_group tree rooted at *from, calling @down when first entering a
 * node and @up when leaving it for the final time.
 *
 * Caller must hold rcu_lock or sufficient equivalent.
709
 */
710
int walk_tg_tree_from(struct task_group *from,
711
			     tg_visitor down, tg_visitor up, void *data)
712 713
{
	struct task_group *parent, *child;
P
Peter Zijlstra 已提交
714
	int ret;
715

716 717
	parent = from;

718
down:
P
Peter Zijlstra 已提交
719 720
	ret = (*down)(parent, data);
	if (ret)
721
		goto out;
722 723 724 725 726 727 728
	list_for_each_entry_rcu(child, &parent->children, siblings) {
		parent = child;
		goto down;

up:
		continue;
	}
P
Peter Zijlstra 已提交
729
	ret = (*up)(parent, data);
730 731
	if (ret || parent == from)
		goto out;
732 733 734 735 736

	child = parent;
	parent = parent->parent;
	if (parent)
		goto up;
737
out:
P
Peter Zijlstra 已提交
738
	return ret;
739 740
}

741
int tg_nop(struct task_group *tg, void *data)
P
Peter Zijlstra 已提交
742
{
743
	return 0;
P
Peter Zijlstra 已提交
744
}
745 746
#endif

747 748
static void set_load_weight(struct task_struct *p)
{
N
Nikhil Rao 已提交
749 750 751
	int prio = p->static_prio - MAX_RT_PRIO;
	struct load_weight *load = &p->se.load;

I
Ingo Molnar 已提交
752 753 754 755
	/*
	 * SCHED_IDLE tasks get minimal weight:
	 */
	if (p->policy == SCHED_IDLE) {
756
		load->weight = scale_load(WEIGHT_IDLEPRIO);
N
Nikhil Rao 已提交
757
		load->inv_weight = WMULT_IDLEPRIO;
I
Ingo Molnar 已提交
758 759
		return;
	}
760

761
	load->weight = scale_load(prio_to_weight[prio]);
N
Nikhil Rao 已提交
762
	load->inv_weight = prio_to_wmult[prio];
763 764
}

765
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
766
{
767
	update_rq_clock(rq);
768
	sched_info_queued(rq, p);
769
	p->sched_class->enqueue_task(rq, p, flags);
770 771
}

772
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
773
{
774
	update_rq_clock(rq);
775
	sched_info_dequeued(rq, p);
776
	p->sched_class->dequeue_task(rq, p, flags);
777 778
}

779
void activate_task(struct rq *rq, struct task_struct *p, int flags)
780 781 782 783
{
	if (task_contributes_to_load(p))
		rq->nr_uninterruptible--;

784
	enqueue_task(rq, p, flags);
785 786
}

787
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
788 789 790 791
{
	if (task_contributes_to_load(p))
		rq->nr_uninterruptible++;

792
	dequeue_task(rq, p, flags);
793 794
}

795
static void update_rq_clock_task(struct rq *rq, s64 delta)
796
{
797 798 799 800 801 802 803 804
/*
 * In theory, the compile should just see 0 here, and optimize out the call
 * to sched_rt_avg_update. But I don't trust it...
 */
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
	s64 steal = 0, irq_delta = 0;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
805
	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826

	/*
	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
	 * this case when a previous update_rq_clock() happened inside a
	 * {soft,}irq region.
	 *
	 * When this happens, we stop ->clock_task and only update the
	 * prev_irq_time stamp to account for the part that fit, so that a next
	 * update will consume the rest. This ensures ->clock_task is
	 * monotonic.
	 *
	 * It does however cause some slight miss-attribution of {soft,}irq
	 * time, a more accurate solution would be to update the irq_time using
	 * the current rq->clock timestamp, except that would require using
	 * atomic ops.
	 */
	if (irq_delta > delta)
		irq_delta = delta;

	rq->prev_irq_time += irq_delta;
	delta -= irq_delta;
827 828
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
829
	if (static_key_false((&paravirt_steal_rq_enabled))) {
830 831 832 833 834 835 836 837 838 839 840
		steal = paravirt_steal_clock(cpu_of(rq));
		steal -= rq->prev_steal_time_rq;

		if (unlikely(steal > delta))
			steal = delta;

		rq->prev_steal_time_rq += steal;
		delta -= steal;
	}
#endif

841 842
	rq->clock_task += delta;

843 844 845 846
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
	if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
		sched_rt_avg_update(rq, irq_delta + steal);
#endif
847 848
}

849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
	struct task_struct *old_stop = cpu_rq(cpu)->stop;

	if (stop) {
		/*
		 * Make it appear like a SCHED_FIFO task, its something
		 * userspace knows about and won't get confused about.
		 *
		 * Also, it will make PI more or less work without too
		 * much confusion -- but then, stop work should not
		 * rely on PI working anyway.
		 */
		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);

		stop->sched_class = &stop_sched_class;
	}

	cpu_rq(cpu)->stop = stop;

	if (old_stop) {
		/*
		 * Reset it back to a normal scheduling class so that
		 * it can die in pieces.
		 */
		old_stop->sched_class = &rt_sched_class;
	}
}

879
/*
I
Ingo Molnar 已提交
880
 * __normal_prio - return the priority that is based on the static prio
881 882 883
 */
static inline int __normal_prio(struct task_struct *p)
{
I
Ingo Molnar 已提交
884
	return p->static_prio;
885 886
}

887 888 889 890 891 892 893
/*
 * Calculate the expected normal priority: i.e. priority
 * without taking RT-inheritance into account. Might be
 * boosted by interactivity modifiers. Changes upon fork,
 * setprio syscalls, and whenever the interactivity
 * estimator recalculates.
 */
894
static inline int normal_prio(struct task_struct *p)
895 896 897
{
	int prio;

898 899 900
	if (task_has_dl_policy(p))
		prio = MAX_DL_PRIO-1;
	else if (task_has_rt_policy(p))
901 902 903 904 905 906 907 908 909 910 911 912 913
		prio = MAX_RT_PRIO-1 - p->rt_priority;
	else
		prio = __normal_prio(p);
	return prio;
}

/*
 * Calculate the current priority, i.e. the priority
 * taken into account by the scheduler. This value might
 * be boosted by RT tasks, or might be boosted by
 * interactivity modifiers. Will be RT if the task got
 * RT-boosted. If not then it returns p->normal_prio.
 */
914
static int effective_prio(struct task_struct *p)
915 916 917 918 919 920 921 922 923 924 925 926
{
	p->normal_prio = normal_prio(p);
	/*
	 * If we are RT tasks or we were boosted to RT priority,
	 * keep the priority unchanged. Otherwise, update priority
	 * to the normal priority:
	 */
	if (!rt_prio(p->prio))
		return p->normal_prio;
	return p->prio;
}

L
Linus Torvalds 已提交
927 928 929
/**
 * task_curr - is this task currently executing on a CPU?
 * @p: the task in question.
930 931
 *
 * Return: 1 if the task is currently executing. 0 otherwise.
L
Linus Torvalds 已提交
932
 */
933
inline int task_curr(const struct task_struct *p)
L
Linus Torvalds 已提交
934 935 936 937
{
	return cpu_curr(task_cpu(p)) == p;
}

938 939
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
				       const struct sched_class *prev_class,
P
Peter Zijlstra 已提交
940
				       int oldprio)
941 942 943
{
	if (prev_class != p->sched_class) {
		if (prev_class->switched_from)
P
Peter Zijlstra 已提交
944 945
			prev_class->switched_from(rq, p);
		p->sched_class->switched_to(rq, p);
946
	} else if (oldprio != p->prio || dl_task(p))
P
Peter Zijlstra 已提交
947
		p->sched_class->prio_changed(rq, p, oldprio);
948 949
}

950
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
{
	const struct sched_class *class;

	if (p->sched_class == rq->curr->sched_class) {
		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
	} else {
		for_each_class(class) {
			if (class == rq->curr->sched_class)
				break;
			if (class == p->sched_class) {
				resched_task(rq->curr);
				break;
			}
		}
	}

	/*
	 * A queue event has occurred, and we're going to schedule.  In
	 * this case, we can save a useless back to back clock update.
	 */
P
Peter Zijlstra 已提交
971
	if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
972 973 974
		rq->skip_clock_update = 1;
}

L
Linus Torvalds 已提交
975
#ifdef CONFIG_SMP
I
Ingo Molnar 已提交
976
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
I
Ingo Molnar 已提交
977
{
978 979 980 981 982
#ifdef CONFIG_SCHED_DEBUG
	/*
	 * We should never call set_task_cpu() on a blocked task,
	 * ttwu() will sort out the placement.
	 */
P
Peter Zijlstra 已提交
983
	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
984
			!(task_preempt_count(p) & PREEMPT_ACTIVE));
985 986

#ifdef CONFIG_LOCKDEP
987 988 989 990 991
	/*
	 * The caller should hold either p->pi_lock or rq->lock, when changing
	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
	 *
	 * sched_move_task() holds both and thus holding either pins the cgroup,
P
Peter Zijlstra 已提交
992
	 * see task_group().
993 994 995 996
	 *
	 * Furthermore, all task_rq users should acquire both locks, see
	 * task_rq_lock().
	 */
997 998 999
	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
				      lockdep_is_held(&task_rq(p)->lock)));
#endif
1000 1001
#endif

1002
	trace_sched_migrate_task(p, new_cpu);
1003

1004
	if (task_cpu(p) != new_cpu) {
1005 1006
		if (p->sched_class->migrate_task_rq)
			p->sched_class->migrate_task_rq(p, new_cpu);
1007
		p->se.nr_migrations++;
1008
		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
1009
	}
I
Ingo Molnar 已提交
1010 1011

	__set_task_cpu(p, new_cpu);
I
Ingo Molnar 已提交
1012 1013
}

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
static void __migrate_swap_task(struct task_struct *p, int cpu)
{
	if (p->on_rq) {
		struct rq *src_rq, *dst_rq;

		src_rq = task_rq(p);
		dst_rq = cpu_rq(cpu);

		deactivate_task(src_rq, p, 0);
		set_task_cpu(p, cpu);
		activate_task(dst_rq, p, 0);
		check_preempt_curr(dst_rq, p, 0);
	} else {
		/*
		 * Task isn't running anymore; make it appear like we migrated
		 * it before it went to sleep. This means on wakeup we make the
		 * previous cpu our targer instead of where it really is.
		 */
		p->wake_cpu = cpu;
	}
}

struct migration_swap_arg {
	struct task_struct *src_task, *dst_task;
	int src_cpu, dst_cpu;
};

static int migrate_swap_stop(void *data)
{
	struct migration_swap_arg *arg = data;
	struct rq *src_rq, *dst_rq;
	int ret = -EAGAIN;

	src_rq = cpu_rq(arg->src_cpu);
	dst_rq = cpu_rq(arg->dst_cpu);

1050 1051
	double_raw_lock(&arg->src_task->pi_lock,
			&arg->dst_task->pi_lock);
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
	double_rq_lock(src_rq, dst_rq);
	if (task_cpu(arg->dst_task) != arg->dst_cpu)
		goto unlock;

	if (task_cpu(arg->src_task) != arg->src_cpu)
		goto unlock;

	if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
		goto unlock;

	if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
		goto unlock;

	__migrate_swap_task(arg->src_task, arg->dst_cpu);
	__migrate_swap_task(arg->dst_task, arg->src_cpu);

	ret = 0;

unlock:
	double_rq_unlock(src_rq, dst_rq);
1072 1073
	raw_spin_unlock(&arg->dst_task->pi_lock);
	raw_spin_unlock(&arg->src_task->pi_lock);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095

	return ret;
}

/*
 * Cross migrate two tasks
 */
int migrate_swap(struct task_struct *cur, struct task_struct *p)
{
	struct migration_swap_arg arg;
	int ret = -EINVAL;

	arg = (struct migration_swap_arg){
		.src_task = cur,
		.src_cpu = task_cpu(cur),
		.dst_task = p,
		.dst_cpu = task_cpu(p),
	};

	if (arg.src_cpu == arg.dst_cpu)
		goto out;

1096 1097 1098 1099
	/*
	 * These three tests are all lockless; this is OK since all of them
	 * will be re-checked with proper locks held further down the line.
	 */
1100 1101 1102 1103 1104 1105 1106 1107 1108
	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
		goto out;

	if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
		goto out;

	if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
		goto out;

1109
	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1110 1111 1112 1113 1114 1115
	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);

out:
	return ret;
}

1116
struct migration_arg {
1117
	struct task_struct *task;
L
Linus Torvalds 已提交
1118
	int dest_cpu;
1119
};
L
Linus Torvalds 已提交
1120

1121 1122
static int migration_cpu_stop(void *data);

L
Linus Torvalds 已提交
1123 1124 1125
/*
 * wait_task_inactive - wait for a thread to unschedule.
 *
R
Roland McGrath 已提交
1126 1127 1128 1129 1130 1131 1132
 * If @match_state is nonzero, it's the @p->state value just checked and
 * not expected to change.  If it changes, i.e. @p might have woken up,
 * then return zero.  When we succeed in waiting for @p to be off its CPU,
 * we return a positive number (its total switch count).  If a second call
 * a short while later returns the same number, the caller can be sure that
 * @p has remained unscheduled the whole time.
 *
L
Linus Torvalds 已提交
1133 1134 1135 1136 1137 1138
 * The caller must ensure that the task *will* unschedule sometime soon,
 * else this function might spin for a *long* time. This function can't
 * be called with interrupts off, or it may introduce deadlock with
 * smp_call_function() if an IPI is sent by the same process we are
 * waiting to become inactive.
 */
R
Roland McGrath 已提交
1139
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
L
Linus Torvalds 已提交
1140 1141
{
	unsigned long flags;
I
Ingo Molnar 已提交
1142
	int running, on_rq;
R
Roland McGrath 已提交
1143
	unsigned long ncsw;
1144
	struct rq *rq;
L
Linus Torvalds 已提交
1145

1146 1147 1148 1149 1150 1151 1152 1153
	for (;;) {
		/*
		 * We do the initial early heuristics without holding
		 * any task-queue locks at all. We'll only try to get
		 * the runqueue lock when things look like they will
		 * work out!
		 */
		rq = task_rq(p);
1154

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
		/*
		 * If the task is actively running on another CPU
		 * still, just relax and busy-wait without holding
		 * any locks.
		 *
		 * NOTE! Since we don't hold any locks, it's not
		 * even sure that "rq" stays as the right runqueue!
		 * But we don't care, since "task_running()" will
		 * return false if the runqueue has changed and p
		 * is actually now running somewhere else!
		 */
R
Roland McGrath 已提交
1166 1167 1168
		while (task_running(rq, p)) {
			if (match_state && unlikely(p->state != match_state))
				return 0;
1169
			cpu_relax();
R
Roland McGrath 已提交
1170
		}
1171

1172 1173 1174 1175 1176 1177
		/*
		 * Ok, time to look more closely! We need the rq
		 * lock now, to be *sure*. If we're wrong, we'll
		 * just go back and repeat.
		 */
		rq = task_rq_lock(p, &flags);
1178
		trace_sched_wait_task(p);
1179
		running = task_running(rq, p);
P
Peter Zijlstra 已提交
1180
		on_rq = p->on_rq;
R
Roland McGrath 已提交
1181
		ncsw = 0;
1182
		if (!match_state || p->state == match_state)
1183
			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1184
		task_rq_unlock(rq, p, &flags);
1185

R
Roland McGrath 已提交
1186 1187 1188 1189 1190 1191
		/*
		 * If it changed from the expected state, bail out now.
		 */
		if (unlikely(!ncsw))
			break;

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
		/*
		 * Was it really running after all now that we
		 * checked with the proper locks actually held?
		 *
		 * Oops. Go back and try again..
		 */
		if (unlikely(running)) {
			cpu_relax();
			continue;
		}
1202

1203 1204 1205 1206 1207
		/*
		 * It's not enough that it's not actively running,
		 * it must be off the runqueue _entirely_, and not
		 * preempted!
		 *
1208
		 * So if it was still runnable (but just not actively
1209 1210 1211 1212
		 * running right now), it's preempted, and we should
		 * yield - it could be a while.
		 */
		if (unlikely(on_rq)) {
1213 1214 1215 1216
			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);

			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1217 1218
			continue;
		}
1219

1220 1221 1222 1223 1224 1225 1226
		/*
		 * Ahh, all good. It wasn't running, and it wasn't
		 * runnable, which means that it will never become
		 * running in the future either. We're all done!
		 */
		break;
	}
R
Roland McGrath 已提交
1227 1228

	return ncsw;
L
Linus Torvalds 已提交
1229 1230 1231 1232 1233 1234 1235 1236 1237
}

/***
 * kick_process - kick a running thread to enter/exit the kernel
 * @p: the to-be-kicked thread
 *
 * Cause a process which is running on another CPU to enter
 * kernel-mode, without any delay. (to get signals handled.)
 *
L
Lucas De Marchi 已提交
1238
 * NOTE: this function doesn't have to take the runqueue lock,
L
Linus Torvalds 已提交
1239 1240 1241 1242 1243
 * because all it wants to ensure is that the remote task enters
 * the kernel. If the IPI races and the task has been migrated
 * to another CPU then no harm is done and the purpose has been
 * achieved as well.
 */
1244
void kick_process(struct task_struct *p)
L
Linus Torvalds 已提交
1245 1246 1247 1248 1249 1250 1251 1252 1253
{
	int cpu;

	preempt_disable();
	cpu = task_cpu(p);
	if ((cpu != smp_processor_id()) && task_curr(p))
		smp_send_reschedule(cpu);
	preempt_enable();
}
R
Rusty Russell 已提交
1254
EXPORT_SYMBOL_GPL(kick_process);
N
Nick Piggin 已提交
1255
#endif /* CONFIG_SMP */
L
Linus Torvalds 已提交
1256

1257
#ifdef CONFIG_SMP
1258
/*
1259
 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1260
 */
1261 1262
static int select_fallback_rq(int cpu, struct task_struct *p)
{
1263 1264
	int nid = cpu_to_node(cpu);
	const struct cpumask *nodemask = NULL;
1265 1266
	enum { cpuset, possible, fail } state = cpuset;
	int dest_cpu;
1267

1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
	/*
	 * If the node that the cpu is on has been offlined, cpu_to_node()
	 * will return -1. There is no cpu on the node, and we should
	 * select the cpu on the other node.
	 */
	if (nid != -1) {
		nodemask = cpumask_of_node(nid);

		/* Look for allowed, online CPU in same node. */
		for_each_cpu(dest_cpu, nodemask) {
			if (!cpu_online(dest_cpu))
				continue;
			if (!cpu_active(dest_cpu))
				continue;
			if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
				return dest_cpu;
		}
1285
	}
1286

1287 1288
	for (;;) {
		/* Any allowed, online CPU? */
1289
		for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1290 1291 1292 1293 1294 1295
			if (!cpu_online(dest_cpu))
				continue;
			if (!cpu_active(dest_cpu))
				continue;
			goto out;
		}
1296

1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
		switch (state) {
		case cpuset:
			/* No more Mr. Nice Guy. */
			cpuset_cpus_allowed_fallback(p);
			state = possible;
			break;

		case possible:
			do_set_cpus_allowed(p, cpu_possible_mask);
			state = fail;
			break;

		case fail:
			BUG();
			break;
		}
	}

out:
	if (state != cpuset) {
		/*
		 * Don't tell them about moving exiting tasks or
		 * kernel threads (both mm NULL), since they never
		 * leave kernel.
		 */
		if (p->mm && printk_ratelimit()) {
			printk_sched("process %d (%s) no longer affine to cpu%d\n",
					task_pid_nr(p), p->comm, cpu);
		}
1326 1327 1328 1329 1330
	}

	return dest_cpu;
}

1331
/*
1332
 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1333
 */
1334
static inline
1335
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1336
{
1337
	cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348

	/*
	 * In order not to call set_task_cpu() on a blocking task we need
	 * to rely on ttwu() to place the task on a valid ->cpus_allowed
	 * cpu.
	 *
	 * Since this is common to all placement strategies, this lives here.
	 *
	 * [ this allows ->select_task() to simply return task_cpu(p) and
	 *   not worry about this generic constraint ]
	 */
1349
	if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
P
Peter Zijlstra 已提交
1350
		     !cpu_online(cpu)))
1351
		cpu = select_fallback_rq(task_cpu(p), p);
1352 1353

	return cpu;
1354
}
1355 1356 1357 1358 1359 1360

static void update_avg(u64 *avg, u64 sample)
{
	s64 diff = sample - *avg;
	*avg += diff >> 3;
}
1361 1362
#endif

P
Peter Zijlstra 已提交
1363
static void
1364
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
T
Tejun Heo 已提交
1365
{
P
Peter Zijlstra 已提交
1366
#ifdef CONFIG_SCHEDSTATS
1367 1368
	struct rq *rq = this_rq();

P
Peter Zijlstra 已提交
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
#ifdef CONFIG_SMP
	int this_cpu = smp_processor_id();

	if (cpu == this_cpu) {
		schedstat_inc(rq, ttwu_local);
		schedstat_inc(p, se.statistics.nr_wakeups_local);
	} else {
		struct sched_domain *sd;

		schedstat_inc(p, se.statistics.nr_wakeups_remote);
1379
		rcu_read_lock();
P
Peter Zijlstra 已提交
1380 1381 1382 1383 1384 1385
		for_each_domain(this_cpu, sd) {
			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
				schedstat_inc(sd, ttwu_wake_remote);
				break;
			}
		}
1386
		rcu_read_unlock();
P
Peter Zijlstra 已提交
1387
	}
1388 1389 1390 1391

	if (wake_flags & WF_MIGRATED)
		schedstat_inc(p, se.statistics.nr_wakeups_migrate);

P
Peter Zijlstra 已提交
1392 1393 1394
#endif /* CONFIG_SMP */

	schedstat_inc(rq, ttwu_count);
T
Tejun Heo 已提交
1395
	schedstat_inc(p, se.statistics.nr_wakeups);
P
Peter Zijlstra 已提交
1396 1397

	if (wake_flags & WF_SYNC)
T
Tejun Heo 已提交
1398
		schedstat_inc(p, se.statistics.nr_wakeups_sync);
P
Peter Zijlstra 已提交
1399 1400 1401 1402 1403 1404

#endif /* CONFIG_SCHEDSTATS */
}

static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
T
Tejun Heo 已提交
1405
	activate_task(rq, p, en_flags);
P
Peter Zijlstra 已提交
1406
	p->on_rq = 1;
1407 1408 1409 1410

	/* if a worker is waking up, notify workqueue */
	if (p->flags & PF_WQ_WORKER)
		wq_worker_waking_up(p, cpu_of(rq));
T
Tejun Heo 已提交
1411 1412
}

1413 1414 1415
/*
 * Mark the task runnable and perform wakeup-preemption.
 */
1416
static void
1417
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
T
Tejun Heo 已提交
1418 1419
{
	check_preempt_curr(rq, p, wake_flags);
1420
	trace_sched_wakeup(p, true);
T
Tejun Heo 已提交
1421 1422 1423 1424 1425 1426

	p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
	if (p->sched_class->task_woken)
		p->sched_class->task_woken(rq, p);

1427
	if (rq->idle_stamp) {
1428
		u64 delta = rq_clock(rq) - rq->idle_stamp;
1429
		u64 max = 2*rq->max_idle_balance_cost;
T
Tejun Heo 已提交
1430

1431 1432 1433
		update_avg(&rq->avg_idle, delta);

		if (rq->avg_idle > max)
T
Tejun Heo 已提交
1434
			rq->avg_idle = max;
1435

T
Tejun Heo 已提交
1436 1437 1438 1439 1440
		rq->idle_stamp = 0;
	}
#endif
}

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
static void
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
{
#ifdef CONFIG_SMP
	if (p->sched_contributes_to_load)
		rq->nr_uninterruptible--;
#endif

	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
	ttwu_do_wakeup(rq, p, wake_flags);
}

/*
 * Called in case the task @p isn't fully descheduled from its runqueue,
 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
 * since all we need to do is flip p->state to TASK_RUNNING, since
 * the task is still ->on_rq.
 */
static int ttwu_remote(struct task_struct *p, int wake_flags)
{
	struct rq *rq;
	int ret = 0;

	rq = __task_rq_lock(p);
	if (p->on_rq) {
1466 1467
		/* check_preempt_curr() may use rq clock */
		update_rq_clock(rq);
1468 1469 1470 1471 1472 1473 1474 1475
		ttwu_do_wakeup(rq, p, wake_flags);
		ret = 1;
	}
	__task_rq_unlock(rq);

	return ret;
}

1476
#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1477
static void sched_ttwu_pending(void)
1478 1479
{
	struct rq *rq = this_rq();
P
Peter Zijlstra 已提交
1480 1481
	struct llist_node *llist = llist_del_all(&rq->wake_list);
	struct task_struct *p;
1482 1483 1484

	raw_spin_lock(&rq->lock);

P
Peter Zijlstra 已提交
1485 1486 1487
	while (llist) {
		p = llist_entry(llist, struct task_struct, wake_entry);
		llist = llist_next(llist);
1488 1489 1490 1491 1492 1493 1494 1495
		ttwu_do_activate(rq, p, 0);
	}

	raw_spin_unlock(&rq->lock);
}

void scheduler_ipi(void)
{
1496 1497 1498 1499 1500
	/*
	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
	 * TIF_NEED_RESCHED remotely (for the first time) will also send
	 * this IPI.
	 */
1501
	preempt_fold_need_resched();
1502

1503 1504 1505
	if (llist_empty(&this_rq()->wake_list)
			&& !tick_nohz_full_cpu(smp_processor_id())
			&& !got_nohz_idle_kick())
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
		return;

	/*
	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
	 * traditionally all their work was done from the interrupt return
	 * path. Now that we actually do some work, we need to make sure
	 * we do call them.
	 *
	 * Some archs already do call them, luckily irq_enter/exit nest
	 * properly.
	 *
	 * Arguably we should visit all archs and update all handlers,
	 * however a fair share of IPIs are still resched only so this would
	 * somewhat pessimize the simple resched case.
	 */
	irq_enter();
1522
	tick_nohz_full_check();
P
Peter Zijlstra 已提交
1523
	sched_ttwu_pending();
1524 1525 1526 1527

	/*
	 * Check if someone kicked us for doing the nohz idle load balance.
	 */
1528
	if (unlikely(got_nohz_idle_kick())) {
1529
		this_rq()->idle_balance = 1;
1530
		raise_softirq_irqoff(SCHED_SOFTIRQ);
1531
	}
1532
	irq_exit();
1533 1534 1535 1536
}

static void ttwu_queue_remote(struct task_struct *p, int cpu)
{
P
Peter Zijlstra 已提交
1537
	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
1538 1539
		smp_send_reschedule(cpu);
}
1540

1541
bool cpus_share_cache(int this_cpu, int that_cpu)
1542 1543 1544
{
	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
1545
#endif /* CONFIG_SMP */
1546

1547 1548 1549 1550
static void ttwu_queue(struct task_struct *p, int cpu)
{
	struct rq *rq = cpu_rq(cpu);

1551
#if defined(CONFIG_SMP)
1552
	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1553
		sched_clock_cpu(cpu); /* sync clocks x-cpu */
1554 1555 1556 1557 1558
		ttwu_queue_remote(p, cpu);
		return;
	}
#endif

1559 1560 1561
	raw_spin_lock(&rq->lock);
	ttwu_do_activate(rq, p, 0);
	raw_spin_unlock(&rq->lock);
T
Tejun Heo 已提交
1562 1563 1564
}

/**
L
Linus Torvalds 已提交
1565
 * try_to_wake_up - wake up a thread
T
Tejun Heo 已提交
1566
 * @p: the thread to be awakened
L
Linus Torvalds 已提交
1567
 * @state: the mask of task states that can be woken
T
Tejun Heo 已提交
1568
 * @wake_flags: wake modifier flags (WF_*)
L
Linus Torvalds 已提交
1569 1570 1571 1572 1573 1574 1575
 *
 * Put it on the run-queue if it's not already there. The "current"
 * thread is always on the run-queue (except when the actual
 * re-schedule is in progress), and as such you're allowed to do
 * the simpler "current->state = TASK_RUNNING" to mark yourself
 * runnable without the overhead of this.
 *
1576
 * Return: %true if @p was woken up, %false if it was already running.
T
Tejun Heo 已提交
1577
 * or @state didn't match @p's state.
L
Linus Torvalds 已提交
1578
 */
1579 1580
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
L
Linus Torvalds 已提交
1581 1582
{
	unsigned long flags;
1583
	int cpu, success = 0;
P
Peter Zijlstra 已提交
1584

1585 1586 1587 1588 1589 1590 1591
	/*
	 * If we are going to wake up a thread waiting for CONDITION we
	 * need to ensure that CONDITION=1 done by the caller can not be
	 * reordered with p->state check below. This pairs with mb() in
	 * set_current_state() the waiting thread does.
	 */
	smp_mb__before_spinlock();
1592
	raw_spin_lock_irqsave(&p->pi_lock, flags);
P
Peter Zijlstra 已提交
1593
	if (!(p->state & state))
L
Linus Torvalds 已提交
1594 1595
		goto out;

1596
	success = 1; /* we're going to change ->state */
L
Linus Torvalds 已提交
1597 1598
	cpu = task_cpu(p);

1599 1600
	if (p->on_rq && ttwu_remote(p, wake_flags))
		goto stat;
L
Linus Torvalds 已提交
1601 1602

#ifdef CONFIG_SMP
P
Peter Zijlstra 已提交
1603
	/*
1604 1605
	 * If the owning (remote) cpu is still in the middle of schedule() with
	 * this task as prev, wait until its done referencing the task.
P
Peter Zijlstra 已提交
1606
	 */
1607
	while (p->on_cpu)
1608
		cpu_relax();
1609
	/*
1610
	 * Pairs with the smp_wmb() in finish_lock_switch().
1611
	 */
1612
	smp_rmb();
L
Linus Torvalds 已提交
1613

1614
	p->sched_contributes_to_load = !!task_contributes_to_load(p);
P
Peter Zijlstra 已提交
1615
	p->state = TASK_WAKING;
1616

1617
	if (p->sched_class->task_waking)
1618
		p->sched_class->task_waking(p);
1619

1620
	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
1621 1622
	if (task_cpu(p) != cpu) {
		wake_flags |= WF_MIGRATED;
1623
		set_task_cpu(p, cpu);
1624
	}
L
Linus Torvalds 已提交
1625 1626
#endif /* CONFIG_SMP */

1627 1628
	ttwu_queue(p, cpu);
stat:
1629
	ttwu_stat(p, cpu, wake_flags);
L
Linus Torvalds 已提交
1630
out:
1631
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
L
Linus Torvalds 已提交
1632 1633 1634 1635

	return success;
}

T
Tejun Heo 已提交
1636 1637 1638 1639
/**
 * try_to_wake_up_local - try to wake up a local task with rq lock held
 * @p: the thread to be awakened
 *
1640
 * Put @p on the run-queue if it's not already there. The caller must
T
Tejun Heo 已提交
1641
 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1642
 * the current task.
T
Tejun Heo 已提交
1643 1644 1645 1646 1647
 */
static void try_to_wake_up_local(struct task_struct *p)
{
	struct rq *rq = task_rq(p);

1648 1649 1650 1651
	if (WARN_ON_ONCE(rq != this_rq()) ||
	    WARN_ON_ONCE(p == current))
		return;

T
Tejun Heo 已提交
1652 1653
	lockdep_assert_held(&rq->lock);

1654 1655 1656 1657 1658 1659
	if (!raw_spin_trylock(&p->pi_lock)) {
		raw_spin_unlock(&rq->lock);
		raw_spin_lock(&p->pi_lock);
		raw_spin_lock(&rq->lock);
	}

T
Tejun Heo 已提交
1660
	if (!(p->state & TASK_NORMAL))
1661
		goto out;
T
Tejun Heo 已提交
1662

P
Peter Zijlstra 已提交
1663
	if (!p->on_rq)
P
Peter Zijlstra 已提交
1664 1665
		ttwu_activate(rq, p, ENQUEUE_WAKEUP);

1666
	ttwu_do_wakeup(rq, p, 0);
1667
	ttwu_stat(p, smp_processor_id(), 0);
1668 1669
out:
	raw_spin_unlock(&p->pi_lock);
T
Tejun Heo 已提交
1670 1671
}

1672 1673 1674 1675 1676
/**
 * wake_up_process - Wake up a specific process
 * @p: The process to be woken up.
 *
 * Attempt to wake up the nominated process and move it to the set of runnable
1677 1678 1679
 * processes.
 *
 * Return: 1 if the process was woken up, 0 if it was already running.
1680 1681 1682 1683
 *
 * It may be assumed that this function implies a write memory barrier before
 * changing the task state if and only if any tasks are woken up.
 */
1684
int wake_up_process(struct task_struct *p)
L
Linus Torvalds 已提交
1685
{
1686 1687
	WARN_ON(task_is_stopped_or_traced(p));
	return try_to_wake_up(p, TASK_NORMAL, 0);
L
Linus Torvalds 已提交
1688 1689 1690
}
EXPORT_SYMBOL(wake_up_process);

1691
int wake_up_state(struct task_struct *p, unsigned int state)
L
Linus Torvalds 已提交
1692 1693 1694 1695 1696 1697 1698
{
	return try_to_wake_up(p, state, 0);
}

/*
 * Perform scheduler related setup for a newly forked process p.
 * p is forked by current.
I
Ingo Molnar 已提交
1699 1700 1701
 *
 * __sched_fork() is basic setup used by init_idle() too:
 */
1702
static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
I
Ingo Molnar 已提交
1703
{
P
Peter Zijlstra 已提交
1704 1705 1706
	p->on_rq			= 0;

	p->se.on_rq			= 0;
I
Ingo Molnar 已提交
1707 1708
	p->se.exec_start		= 0;
	p->se.sum_exec_runtime		= 0;
1709
	p->se.prev_sum_exec_runtime	= 0;
1710
	p->se.nr_migrations		= 0;
P
Peter Zijlstra 已提交
1711
	p->se.vruntime			= 0;
P
Peter Zijlstra 已提交
1712
	INIT_LIST_HEAD(&p->se.group_node);
I
Ingo Molnar 已提交
1713 1714

#ifdef CONFIG_SCHEDSTATS
1715
	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
I
Ingo Molnar 已提交
1716
#endif
N
Nick Piggin 已提交
1717

1718 1719 1720 1721
	RB_CLEAR_NODE(&p->dl.rb_node);
	hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	p->dl.dl_runtime = p->dl.runtime = 0;
	p->dl.dl_deadline = p->dl.deadline = 0;
1722
	p->dl.dl_period = 0;
1723 1724
	p->dl.flags = 0;

P
Peter Zijlstra 已提交
1725
	INIT_LIST_HEAD(&p->rt.run_list);
N
Nick Piggin 已提交
1726

1727 1728 1729
#ifdef CONFIG_PREEMPT_NOTIFIERS
	INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
1730 1731 1732

#ifdef CONFIG_NUMA_BALANCING
	if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
1733
		p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1734 1735 1736
		p->mm->numa_scan_seq = 0;
	}

1737 1738 1739 1740 1741
	if (clone_flags & CLONE_VM)
		p->numa_preferred_nid = current->numa_preferred_nid;
	else
		p->numa_preferred_nid = -1;

1742 1743
	p->node_stamp = 0ULL;
	p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
1744
	p->numa_scan_period = sysctl_numa_balancing_scan_delay;
1745
	p->numa_work.next = &p->numa_work;
1746 1747
	p->numa_faults_memory = NULL;
	p->numa_faults_buffer_memory = NULL;
1748 1749
	p->last_task_numa_placement = 0;
	p->last_sum_exec_runtime = 0;
1750 1751 1752

	INIT_LIST_HEAD(&p->numa_entry);
	p->numa_group = NULL;
1753
#endif /* CONFIG_NUMA_BALANCING */
I
Ingo Molnar 已提交
1754 1755
}

1756
#ifdef CONFIG_NUMA_BALANCING
1757
#ifdef CONFIG_SCHED_DEBUG
1758 1759 1760 1761 1762 1763 1764
void set_numabalancing_state(bool enabled)
{
	if (enabled)
		sched_feat_set("NUMA");
	else
		sched_feat_set("NO_NUMA");
}
1765 1766 1767 1768 1769 1770
#else
__read_mostly bool numabalancing_enabled;

void set_numabalancing_state(bool enabled)
{
	numabalancing_enabled = enabled;
I
Ingo Molnar 已提交
1771
}
1772
#endif /* CONFIG_SCHED_DEBUG */
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795

#ifdef CONFIG_PROC_SYSCTL
int sysctl_numa_balancing(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp, loff_t *ppos)
{
	struct ctl_table t;
	int err;
	int state = numabalancing_enabled;

	if (write && !capable(CAP_SYS_ADMIN))
		return -EPERM;

	t = *table;
	t.data = &state;
	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
	if (err < 0)
		return err;
	if (write)
		set_numabalancing_state(state);
	return err;
}
#endif
#endif
I
Ingo Molnar 已提交
1796 1797 1798 1799

/*
 * fork()/clone()-time setup:
 */
1800
int sched_fork(unsigned long clone_flags, struct task_struct *p)
I
Ingo Molnar 已提交
1801
{
1802
	unsigned long flags;
I
Ingo Molnar 已提交
1803 1804
	int cpu = get_cpu();

1805
	__sched_fork(clone_flags, p);
1806
	/*
1807
	 * We mark the process as running here. This guarantees that
1808 1809 1810
	 * nobody will actually run it, and a signal or other external
	 * event cannot wake it up and insert it on the runqueue either.
	 */
1811
	p->state = TASK_RUNNING;
I
Ingo Molnar 已提交
1812

1813 1814 1815 1816 1817
	/*
	 * Make sure we do not leak PI boosting priority to the child.
	 */
	p->prio = current->normal_prio;

1818 1819 1820 1821
	/*
	 * Revert to default priority/policy on fork if requested.
	 */
	if (unlikely(p->sched_reset_on_fork)) {
1822
		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1823
			p->policy = SCHED_NORMAL;
1824
			p->static_prio = NICE_TO_PRIO(0);
1825 1826 1827 1828 1829 1830
			p->rt_priority = 0;
		} else if (PRIO_TO_NICE(p->static_prio) < 0)
			p->static_prio = NICE_TO_PRIO(0);

		p->prio = p->normal_prio = __normal_prio(p);
		set_load_weight(p);
1831

1832 1833 1834 1835 1836 1837
		/*
		 * We don't need the reset flag anymore after the fork. It has
		 * fulfilled its duty:
		 */
		p->sched_reset_on_fork = 0;
	}
1838

1839 1840 1841 1842 1843 1844
	if (dl_prio(p->prio)) {
		put_cpu();
		return -EAGAIN;
	} else if (rt_prio(p->prio)) {
		p->sched_class = &rt_sched_class;
	} else {
H
Hiroshi Shimamoto 已提交
1845
		p->sched_class = &fair_sched_class;
1846
	}
1847

P
Peter Zijlstra 已提交
1848 1849 1850
	if (p->sched_class->task_fork)
		p->sched_class->task_fork(p);

1851 1852 1853 1854 1855 1856 1857
	/*
	 * The child is not yet in the pid-hash so no cgroup attach races,
	 * and the cgroup is pinned to this child due to cgroup_fork()
	 * is ran before sched_fork().
	 *
	 * Silence PROVE_RCU.
	 */
1858
	raw_spin_lock_irqsave(&p->pi_lock, flags);
1859
	set_task_cpu(p, cpu);
1860
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1861

1862
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
I
Ingo Molnar 已提交
1863
	if (likely(sched_info_on()))
1864
		memset(&p->sched_info, 0, sizeof(p->sched_info));
L
Linus Torvalds 已提交
1865
#endif
P
Peter Zijlstra 已提交
1866 1867
#if defined(CONFIG_SMP)
	p->on_cpu = 0;
1868
#endif
1869
	init_task_preempt_count(p);
1870
#ifdef CONFIG_SMP
1871
	plist_node_init(&p->pushable_tasks, MAX_PRIO);
1872
	RB_CLEAR_NODE(&p->pushable_dl_tasks);
1873
#endif
1874

N
Nick Piggin 已提交
1875
	put_cpu();
1876
	return 0;
L
Linus Torvalds 已提交
1877 1878
}

1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
unsigned long to_ratio(u64 period, u64 runtime)
{
	if (runtime == RUNTIME_INF)
		return 1ULL << 20;

	/*
	 * Doing this here saves a lot of checks in all
	 * the calling paths, and returning zero seems
	 * safe for them anyway.
	 */
	if (period == 0)
		return 0;

	return div64_u64(runtime << 20, period);
}

#ifdef CONFIG_SMP
inline struct dl_bw *dl_bw_of(int i)
{
	return &cpu_rq(i)->rd->dl_bw;
}

1901
static inline int dl_bw_cpus(int i)
1902
{
1903 1904 1905 1906 1907 1908 1909
	struct root_domain *rd = cpu_rq(i)->rd;
	int cpus = 0;

	for_each_cpu_and(i, rd->span, cpu_active_mask)
		cpus++;

	return cpus;
1910 1911 1912 1913 1914 1915 1916
}
#else
inline struct dl_bw *dl_bw_of(int i)
{
	return &cpu_rq(i)->dl.dl_bw;
}

1917
static inline int dl_bw_cpus(int i)
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
{
	return 1;
}
#endif

static inline
void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
{
	dl_b->total_bw -= tsk_bw;
}

static inline
void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
{
	dl_b->total_bw += tsk_bw;
}

static inline
bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
{
	return dl_b->bw != -1 &&
	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
}

/*
 * We must be sure that accepting a new task (or allowing changing the
 * parameters of an existing one) is consistent with the bandwidth
 * constraints. If yes, this function also accordingly updates the currently
 * allocated bandwidth to reflect the new situation.
 *
 * This function is called while holding p's rq->lock.
 */
static int dl_overflow(struct task_struct *p, int policy,
		       const struct sched_attr *attr)
{

	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1955
	u64 period = attr->sched_period ?: attr->sched_deadline;
1956 1957
	u64 runtime = attr->sched_runtime;
	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
1958
	int cpus, err = -1;
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968

	if (new_bw == p->dl.dl_bw)
		return 0;

	/*
	 * Either if a task, enters, leave, or stays -deadline but changes
	 * its parameters, we may need to update accordingly the total
	 * allocated bandwidth of the container.
	 */
	raw_spin_lock(&dl_b->lock);
1969
	cpus = dl_bw_cpus(task_cpu(p));
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
	if (dl_policy(policy) && !task_has_dl_policy(p) &&
	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
		__dl_add(dl_b, new_bw);
		err = 0;
	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
		__dl_clear(dl_b, p->dl.dl_bw);
		__dl_add(dl_b, new_bw);
		err = 0;
	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
		__dl_clear(dl_b, p->dl.dl_bw);
		err = 0;
	}
	raw_spin_unlock(&dl_b->lock);

	return err;
}

extern void init_dl_bw(struct dl_bw *dl_b);

L
Linus Torvalds 已提交
1990 1991 1992 1993 1994 1995 1996
/*
 * wake_up_new_task - wake up a newly created task for the first time.
 *
 * This function will do some initial scheduler statistics housekeeping
 * that must be done for every newly created context, then puts the task
 * on the runqueue and wakes it.
 */
1997
void wake_up_new_task(struct task_struct *p)
L
Linus Torvalds 已提交
1998 1999
{
	unsigned long flags;
I
Ingo Molnar 已提交
2000
	struct rq *rq;
2001

2002
	raw_spin_lock_irqsave(&p->pi_lock, flags);
2003 2004 2005 2006 2007 2008
#ifdef CONFIG_SMP
	/*
	 * Fork balancing, do it here and not earlier because:
	 *  - cpus_allowed can change in the fork path
	 *  - any previously selected cpu might disappear through hotplug
	 */
2009
	set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2010 2011
#endif

2012 2013
	/* Initialize new task's runnable average */
	init_task_runnable_average(p);
2014
	rq = __task_rq_lock(p);
P
Peter Zijlstra 已提交
2015
	activate_task(rq, p, 0);
P
Peter Zijlstra 已提交
2016
	p->on_rq = 1;
2017
	trace_sched_wakeup_new(p, true);
P
Peter Zijlstra 已提交
2018
	check_preempt_curr(rq, p, WF_FORK);
2019
#ifdef CONFIG_SMP
2020 2021
	if (p->sched_class->task_woken)
		p->sched_class->task_woken(rq, p);
2022
#endif
2023
	task_rq_unlock(rq, p, &flags);
L
Linus Torvalds 已提交
2024 2025
}

2026 2027 2028
#ifdef CONFIG_PREEMPT_NOTIFIERS

/**
2029
 * preempt_notifier_register - tell me when current is being preempted & rescheduled
R
Randy Dunlap 已提交
2030
 * @notifier: notifier struct to register
2031 2032 2033 2034 2035 2036 2037 2038 2039
 */
void preempt_notifier_register(struct preempt_notifier *notifier)
{
	hlist_add_head(&notifier->link, &current->preempt_notifiers);
}
EXPORT_SYMBOL_GPL(preempt_notifier_register);

/**
 * preempt_notifier_unregister - no longer interested in preemption notifications
R
Randy Dunlap 已提交
2040
 * @notifier: notifier struct to unregister
2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053
 *
 * This is safe to call from within a preemption notifier.
 */
void preempt_notifier_unregister(struct preempt_notifier *notifier)
{
	hlist_del(&notifier->link);
}
EXPORT_SYMBOL_GPL(preempt_notifier_unregister);

static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
	struct preempt_notifier *notifier;

2054
	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2055 2056 2057 2058 2059 2060 2061 2062 2063
		notifier->ops->sched_in(notifier, raw_smp_processor_id());
}

static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
				 struct task_struct *next)
{
	struct preempt_notifier *notifier;

2064
	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2065 2066 2067
		notifier->ops->sched_out(notifier, next);
}

2068
#else /* !CONFIG_PREEMPT_NOTIFIERS */
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079

static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
}

static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
				 struct task_struct *next)
{
}

2080
#endif /* CONFIG_PREEMPT_NOTIFIERS */
2081

2082 2083 2084
/**
 * prepare_task_switch - prepare to switch tasks
 * @rq: the runqueue preparing to switch
R
Randy Dunlap 已提交
2085
 * @prev: the current task that is being switched out
2086 2087 2088 2089 2090 2091 2092 2093 2094
 * @next: the task we are going to switch to.
 *
 * This is called with the rq lock held and interrupts off. It must
 * be paired with a subsequent finish_task_switch after the context
 * switch.
 *
 * prepare_task_switch sets up locking and calls architecture specific
 * hooks.
 */
2095 2096 2097
static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
		    struct task_struct *next)
2098
{
2099
	trace_sched_switch(prev, next);
2100
	sched_info_switch(rq, prev, next);
2101
	perf_event_task_sched_out(prev, next);
2102
	fire_sched_out_preempt_notifiers(prev, next);
2103 2104 2105 2106
	prepare_lock_switch(rq, next);
	prepare_arch_switch(next);
}

L
Linus Torvalds 已提交
2107 2108
/**
 * finish_task_switch - clean up after a task-switch
2109
 * @rq: runqueue associated with task-switch
L
Linus Torvalds 已提交
2110 2111
 * @prev: the thread we just switched away from.
 *
2112 2113 2114 2115
 * finish_task_switch must be called after the context switch, paired
 * with a prepare_task_switch call before the context switch.
 * finish_task_switch will reconcile locking set up by prepare_task_switch,
 * and do any other architecture-specific cleanup actions.
L
Linus Torvalds 已提交
2116 2117
 *
 * Note that we may have delayed dropping an mm in context_switch(). If
I
Ingo Molnar 已提交
2118
 * so, we finish that here outside of the runqueue lock. (Doing it
L
Linus Torvalds 已提交
2119 2120 2121
 * with the lock held can cause deadlocks; see schedule() for
 * details.)
 */
A
Alexey Dobriyan 已提交
2122
static void finish_task_switch(struct rq *rq, struct task_struct *prev)
L
Linus Torvalds 已提交
2123 2124 2125
	__releases(rq->lock)
{
	struct mm_struct *mm = rq->prev_mm;
O
Oleg Nesterov 已提交
2126
	long prev_state;
L
Linus Torvalds 已提交
2127 2128 2129 2130 2131

	rq->prev_mm = NULL;

	/*
	 * A task struct has one reference for the use as "current".
2132
	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
O
Oleg Nesterov 已提交
2133 2134
	 * schedule one last time. The schedule call will never return, and
	 * the scheduled task must drop that reference.
2135
	 * The test for TASK_DEAD must occur while the runqueue locks are
L
Linus Torvalds 已提交
2136 2137 2138 2139 2140
	 * still held, otherwise prev could be scheduled on another cpu, die
	 * there before we look at prev->state, and then the reference would
	 * be dropped twice.
	 *		Manfred Spraul <manfred@colorfullife.com>
	 */
O
Oleg Nesterov 已提交
2141
	prev_state = prev->state;
2142
	vtime_task_switch(prev);
2143
	finish_arch_switch(prev);
2144
	perf_event_task_sched_in(prev, current);
2145
	finish_lock_switch(rq, prev);
2146
	finish_arch_post_lock_switch();
S
Steven Rostedt 已提交
2147

2148
	fire_sched_in_preempt_notifiers(current);
L
Linus Torvalds 已提交
2149 2150
	if (mm)
		mmdrop(mm);
2151
	if (unlikely(prev_state == TASK_DEAD)) {
2152 2153 2154
		if (prev->sched_class->task_dead)
			prev->sched_class->task_dead(prev);

2155 2156 2157
		/*
		 * Remove function-return probe instances associated with this
		 * task and put them back on the free list.
I
Ingo Molnar 已提交
2158
		 */
2159
		kprobe_flush_task(prev);
L
Linus Torvalds 已提交
2160
		put_task_struct(prev);
2161
	}
2162 2163

	tick_nohz_task_switch(current);
L
Linus Torvalds 已提交
2164 2165
}

2166 2167 2168 2169 2170 2171 2172 2173
#ifdef CONFIG_SMP

/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
	if (rq->post_schedule) {
		unsigned long flags;

2174
		raw_spin_lock_irqsave(&rq->lock, flags);
2175 2176
		if (rq->curr->sched_class->post_schedule)
			rq->curr->sched_class->post_schedule(rq);
2177
		raw_spin_unlock_irqrestore(&rq->lock, flags);
2178 2179 2180 2181 2182 2183

		rq->post_schedule = 0;
	}
}

#else
2184

2185 2186
static inline void post_schedule(struct rq *rq)
{
L
Linus Torvalds 已提交
2187 2188
}

2189 2190
#endif

L
Linus Torvalds 已提交
2191 2192 2193 2194
/**
 * schedule_tail - first thing a freshly forked thread must call.
 * @prev: the thread we just switched away from.
 */
2195
asmlinkage void schedule_tail(struct task_struct *prev)
L
Linus Torvalds 已提交
2196 2197
	__releases(rq->lock)
{
2198 2199
	struct rq *rq = this_rq();

2200
	finish_task_switch(rq, prev);
2201

2202 2203 2204 2205 2206
	/*
	 * FIXME: do we need to worry about rq being invalidated by the
	 * task_switch?
	 */
	post_schedule(rq);
2207

2208 2209 2210 2211
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
	/* In this case, finish_task_switch does not reenable preemption */
	preempt_enable();
#endif
L
Linus Torvalds 已提交
2212
	if (current->set_child_tid)
2213
		put_user(task_pid_vnr(current), current->set_child_tid);
L
Linus Torvalds 已提交
2214 2215 2216 2217 2218 2219
}

/*
 * context_switch - switch to the new MM and the new
 * thread's register state.
 */
I
Ingo Molnar 已提交
2220
static inline void
2221
context_switch(struct rq *rq, struct task_struct *prev,
2222
	       struct task_struct *next)
L
Linus Torvalds 已提交
2223
{
I
Ingo Molnar 已提交
2224
	struct mm_struct *mm, *oldmm;
L
Linus Torvalds 已提交
2225

2226
	prepare_task_switch(rq, prev, next);
2227

I
Ingo Molnar 已提交
2228 2229
	mm = next->mm;
	oldmm = prev->active_mm;
2230 2231 2232 2233 2234
	/*
	 * For paravirt, this is coupled with an exit in switch_to to
	 * combine the page table reload and the switch backend into
	 * one hypercall.
	 */
2235
	arch_start_context_switch(prev);
2236

2237
	if (!mm) {
L
Linus Torvalds 已提交
2238 2239 2240 2241 2242 2243
		next->active_mm = oldmm;
		atomic_inc(&oldmm->mm_count);
		enter_lazy_tlb(oldmm, next);
	} else
		switch_mm(oldmm, mm, next);

2244
	if (!prev->mm) {
L
Linus Torvalds 已提交
2245 2246 2247
		prev->active_mm = NULL;
		rq->prev_mm = oldmm;
	}
2248 2249 2250 2251 2252 2253 2254
	/*
	 * Since the runqueue lock will be released by the next
	 * task (which is an invalid locking op but in the case
	 * of the scheduler it's an obvious special-case), so we
	 * do an early lockdep release here:
	 */
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2255
	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2256
#endif
L
Linus Torvalds 已提交
2257

2258
	context_tracking_task_switch(prev, next);
L
Linus Torvalds 已提交
2259 2260 2261
	/* Here we just switch the register state and the stack. */
	switch_to(prev, next, prev);

I
Ingo Molnar 已提交
2262 2263 2264 2265 2266 2267 2268
	barrier();
	/*
	 * this_rq must be evaluated again because prev may have moved
	 * CPUs since it called schedule(), thus the 'rq' on its stack
	 * frame will be invalid.
	 */
	finish_task_switch(this_rq(), prev);
L
Linus Torvalds 已提交
2269 2270 2271
}

/*
2272
 * nr_running and nr_context_switches:
L
Linus Torvalds 已提交
2273 2274
 *
 * externally visible scheduler statistics: current number of runnable
2275
 * threads, total number of context switches performed since bootup.
L
Linus Torvalds 已提交
2276 2277 2278 2279 2280 2281 2282 2283 2284
 */
unsigned long nr_running(void)
{
	unsigned long i, sum = 0;

	for_each_online_cpu(i)
		sum += cpu_rq(i)->nr_running;

	return sum;
2285
}
L
Linus Torvalds 已提交
2286 2287

unsigned long long nr_context_switches(void)
2288
{
2289 2290
	int i;
	unsigned long long sum = 0;
2291

2292
	for_each_possible_cpu(i)
L
Linus Torvalds 已提交
2293
		sum += cpu_rq(i)->nr_switches;
2294

L
Linus Torvalds 已提交
2295 2296
	return sum;
}
2297

L
Linus Torvalds 已提交
2298 2299 2300
unsigned long nr_iowait(void)
{
	unsigned long i, sum = 0;
2301

2302
	for_each_possible_cpu(i)
L
Linus Torvalds 已提交
2303
		sum += atomic_read(&cpu_rq(i)->nr_iowait);
2304

L
Linus Torvalds 已提交
2305 2306
	return sum;
}
2307

2308
unsigned long nr_iowait_cpu(int cpu)
2309
{
2310
	struct rq *this = cpu_rq(cpu);
2311 2312
	return atomic_read(&this->nr_iowait);
}
2313

I
Ingo Molnar 已提交
2314
#ifdef CONFIG_SMP
2315

2316
/*
P
Peter Zijlstra 已提交
2317 2318
 * sched_exec - execve() is a valuable balancing opportunity, because at
 * this point the task has the smallest effective memory and cache footprint.
2319
 */
P
Peter Zijlstra 已提交
2320
void sched_exec(void)
2321
{
P
Peter Zijlstra 已提交
2322
	struct task_struct *p = current;
L
Linus Torvalds 已提交
2323
	unsigned long flags;
2324
	int dest_cpu;
2325

2326
	raw_spin_lock_irqsave(&p->pi_lock, flags);
2327
	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
2328 2329
	if (dest_cpu == smp_processor_id())
		goto unlock;
P
Peter Zijlstra 已提交
2330

2331
	if (likely(cpu_active(dest_cpu))) {
2332
		struct migration_arg arg = { p, dest_cpu };
2333

2334 2335
		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
L
Linus Torvalds 已提交
2336 2337
		return;
	}
2338
unlock:
2339
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
L
Linus Torvalds 已提交
2340
}
I
Ingo Molnar 已提交
2341

L
Linus Torvalds 已提交
2342 2343 2344
#endif

DEFINE_PER_CPU(struct kernel_stat, kstat);
2345
DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
L
Linus Torvalds 已提交
2346 2347

EXPORT_PER_CPU_SYMBOL(kstat);
2348
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
L
Linus Torvalds 已提交
2349 2350

/*
2351
 * Return any ns on the sched_clock that have not yet been accounted in
2352
 * @p in case that task is currently running.
2353 2354
 *
 * Called with task_rq_lock() held on @rq.
L
Linus Torvalds 已提交
2355
 */
2356 2357 2358 2359 2360 2361
static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
{
	u64 ns = 0;

	if (task_current(rq, p)) {
		update_rq_clock(rq);
2362
		ns = rq_clock_task(rq) - p->se.exec_start;
2363 2364 2365 2366 2367 2368 2369
		if ((s64)ns < 0)
			ns = 0;
	}

	return ns;
}

2370
unsigned long long task_delta_exec(struct task_struct *p)
L
Linus Torvalds 已提交
2371 2372
{
	unsigned long flags;
2373
	struct rq *rq;
2374
	u64 ns = 0;
2375

2376
	rq = task_rq_lock(p, &flags);
2377
	ns = do_task_delta_exec(p, rq);
2378
	task_rq_unlock(rq, p, &flags);
2379

2380 2381
	return ns;
}
2382

2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
/*
 * Return accounted runtime for the task.
 * In case the task is currently running, return the runtime plus current's
 * pending runtime that have not been accounted yet.
 */
unsigned long long task_sched_runtime(struct task_struct *p)
{
	unsigned long flags;
	struct rq *rq;
	u64 ns = 0;

2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
	/*
	 * 64-bit doesn't need locks to atomically read a 64bit value.
	 * So we have a optimization chance when the task's delta_exec is 0.
	 * Reading ->on_cpu is racy, but this is ok.
	 *
	 * If we race with it leaving cpu, we'll take a lock. So we're correct.
	 * If we race with it entering cpu, unaccounted time is 0. This is
	 * indistinguishable from the read occurring a few cycles earlier.
	 */
	if (!p->on_cpu)
		return p->se.sum_exec_runtime;
#endif

2408 2409
	rq = task_rq_lock(p, &flags);
	ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
2410
	task_rq_unlock(rq, p, &flags);
2411 2412 2413

	return ns;
}
2414

2415 2416 2417 2418 2419 2420 2421 2422
/*
 * This function gets called by the timer code, with HZ frequency.
 * We call it with interrupts disabled.
 */
void scheduler_tick(void)
{
	int cpu = smp_processor_id();
	struct rq *rq = cpu_rq(cpu);
I
Ingo Molnar 已提交
2423
	struct task_struct *curr = rq->curr;
2424 2425

	sched_clock_tick();
I
Ingo Molnar 已提交
2426

2427
	raw_spin_lock(&rq->lock);
2428
	update_rq_clock(rq);
P
Peter Zijlstra 已提交
2429
	curr->sched_class->task_tick(rq, curr, 0);
2430
	update_cpu_load_active(rq);
2431
	raw_spin_unlock(&rq->lock);
2432

2433
	perf_event_task_tick();
2434

2435
#ifdef CONFIG_SMP
2436
	rq->idle_balance = idle_cpu(cpu);
2437
	trigger_load_balance(rq);
2438
#endif
2439
	rq_last_tick_reset(rq);
L
Linus Torvalds 已提交
2440 2441
}

2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
#ifdef CONFIG_NO_HZ_FULL
/**
 * scheduler_tick_max_deferment
 *
 * Keep at least one tick per second when a single
 * active task is running because the scheduler doesn't
 * yet completely support full dynticks environment.
 *
 * This makes sure that uptime, CFS vruntime, load
 * balancing, etc... continue to move forward, even
 * with a very low granularity.
2453 2454
 *
 * Return: Maximum deferment in nanoseconds.
2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
 */
u64 scheduler_tick_max_deferment(void)
{
	struct rq *rq = this_rq();
	unsigned long next, now = ACCESS_ONCE(jiffies);

	next = rq->last_sched_tick + HZ;

	if (time_before_eq(next, now))
		return 0;

2466
	return jiffies_to_nsecs(next - now);
L
Linus Torvalds 已提交
2467
}
2468
#endif
L
Linus Torvalds 已提交
2469

2470
notrace unsigned long get_parent_ip(unsigned long addr)
2471 2472 2473 2474 2475 2476 2477 2478
{
	if (in_lock_functions(addr)) {
		addr = CALLER_ADDR2;
		if (in_lock_functions(addr))
			addr = CALLER_ADDR3;
	}
	return addr;
}
L
Linus Torvalds 已提交
2479

2480 2481 2482
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
				defined(CONFIG_PREEMPT_TRACER))

2483
void __kprobes preempt_count_add(int val)
L
Linus Torvalds 已提交
2484
{
2485
#ifdef CONFIG_DEBUG_PREEMPT
L
Linus Torvalds 已提交
2486 2487 2488
	/*
	 * Underflow?
	 */
2489 2490
	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
		return;
2491
#endif
2492
	__preempt_count_add(val);
2493
#ifdef CONFIG_DEBUG_PREEMPT
L
Linus Torvalds 已提交
2494 2495 2496
	/*
	 * Spinlock count overflowing soon?
	 */
2497 2498
	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
				PREEMPT_MASK - 10);
2499
#endif
2500 2501 2502 2503 2504 2505 2506
	if (preempt_count() == val) {
		unsigned long ip = get_parent_ip(CALLER_ADDR1);
#ifdef CONFIG_DEBUG_PREEMPT
		current->preempt_disable_ip = ip;
#endif
		trace_preempt_off(CALLER_ADDR0, ip);
	}
L
Linus Torvalds 已提交
2507
}
2508
EXPORT_SYMBOL(preempt_count_add);
L
Linus Torvalds 已提交
2509

2510
void __kprobes preempt_count_sub(int val)
L
Linus Torvalds 已提交
2511
{
2512
#ifdef CONFIG_DEBUG_PREEMPT
L
Linus Torvalds 已提交
2513 2514 2515
	/*
	 * Underflow?
	 */
2516
	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
2517
		return;
L
Linus Torvalds 已提交
2518 2519 2520
	/*
	 * Is the spinlock portion underflowing?
	 */
2521 2522 2523
	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
			!(preempt_count() & PREEMPT_MASK)))
		return;
2524
#endif
2525

2526 2527
	if (preempt_count() == val)
		trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2528
	__preempt_count_sub(val);
L
Linus Torvalds 已提交
2529
}
2530
EXPORT_SYMBOL(preempt_count_sub);
L
Linus Torvalds 已提交
2531 2532 2533 2534

#endif

/*
I
Ingo Molnar 已提交
2535
 * Print scheduling while atomic bug:
L
Linus Torvalds 已提交
2536
 */
I
Ingo Molnar 已提交
2537
static noinline void __schedule_bug(struct task_struct *prev)
L
Linus Torvalds 已提交
2538
{
2539 2540 2541
	if (oops_in_progress)
		return;

P
Peter Zijlstra 已提交
2542 2543
	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
		prev->comm, prev->pid, preempt_count());
2544

I
Ingo Molnar 已提交
2545
	debug_show_held_locks(prev);
2546
	print_modules();
I
Ingo Molnar 已提交
2547 2548
	if (irqs_disabled())
		print_irqtrace_events(prev);
2549 2550 2551 2552 2553 2554 2555
#ifdef CONFIG_DEBUG_PREEMPT
	if (in_atomic_preempt_off()) {
		pr_err("Preemption disabled at:");
		print_ip_sym(current->preempt_disable_ip);
		pr_cont("\n");
	}
#endif
2556
	dump_stack();
2557
	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
I
Ingo Molnar 已提交
2558
}
L
Linus Torvalds 已提交
2559

I
Ingo Molnar 已提交
2560 2561 2562 2563 2564
/*
 * Various schedule()-time debugging checks and statistics:
 */
static inline void schedule_debug(struct task_struct *prev)
{
L
Linus Torvalds 已提交
2565
	/*
I
Ingo Molnar 已提交
2566
	 * Test if we are atomic. Since do_exit() needs to call into
2567 2568
	 * schedule() atomically, we ignore that path. Otherwise whine
	 * if we are scheduling when we should not.
L
Linus Torvalds 已提交
2569
	 */
2570
	if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
I
Ingo Molnar 已提交
2571
		__schedule_bug(prev);
2572
	rcu_sleep_check();
I
Ingo Molnar 已提交
2573

L
Linus Torvalds 已提交
2574 2575
	profile_hit(SCHED_PROFILING, __builtin_return_address(0));

2576
	schedstat_inc(this_rq(), sched_count);
I
Ingo Molnar 已提交
2577 2578 2579 2580 2581 2582
}

/*
 * Pick up the highest-prio task:
 */
static inline struct task_struct *
2583
pick_next_task(struct rq *rq, struct task_struct *prev)
I
Ingo Molnar 已提交
2584
{
2585
	const struct sched_class *class = &fair_sched_class;
I
Ingo Molnar 已提交
2586
	struct task_struct *p;
L
Linus Torvalds 已提交
2587 2588

	/*
I
Ingo Molnar 已提交
2589 2590
	 * Optimization: we know that if all tasks are in
	 * the fair class we can call that function directly:
L
Linus Torvalds 已提交
2591
	 */
2592
	if (likely(prev->sched_class == class &&
2593
		   rq->nr_running == rq->cfs.h_nr_running)) {
2594
		p = fair_sched_class.pick_next_task(rq, prev);
2595 2596 2597 2598 2599 2600 2601 2602
		if (unlikely(p == RETRY_TASK))
			goto again;

		/* assumes fair_sched_class->next == idle_sched_class */
		if (unlikely(!p))
			p = idle_sched_class.pick_next_task(rq, prev);

		return p;
L
Linus Torvalds 已提交
2603 2604
	}

2605
again:
2606
	for_each_class(class) {
2607
		p = class->pick_next_task(rq, prev);
2608 2609 2610
		if (p) {
			if (unlikely(p == RETRY_TASK))
				goto again;
I
Ingo Molnar 已提交
2611
			return p;
2612
		}
I
Ingo Molnar 已提交
2613
	}
2614 2615

	BUG(); /* the idle class will always have a runnable task */
I
Ingo Molnar 已提交
2616
}
L
Linus Torvalds 已提交
2617

I
Ingo Molnar 已提交
2618
/*
2619
 * __schedule() is the main scheduler function.
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653
 *
 * The main means of driving the scheduler and thus entering this function are:
 *
 *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
 *
 *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
 *      paths. For example, see arch/x86/entry_64.S.
 *
 *      To drive preemption between tasks, the scheduler sets the flag in timer
 *      interrupt handler scheduler_tick().
 *
 *   3. Wakeups don't really cause entry into schedule(). They add a
 *      task to the run-queue and that's it.
 *
 *      Now, if the new task added to the run-queue preempts the current
 *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
 *      called on the nearest possible occasion:
 *
 *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
 *
 *         - in syscall or exception context, at the next outmost
 *           preempt_enable(). (this might be as soon as the wake_up()'s
 *           spin_unlock()!)
 *
 *         - in IRQ context, return from interrupt-handler to
 *           preemptible context
 *
 *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
 *         then at the next:
 *
 *          - cond_resched() call
 *          - explicit schedule() call
 *          - return from syscall or exception to user-space
 *          - return from interrupt-handler to user-space
I
Ingo Molnar 已提交
2654
 */
2655
static void __sched __schedule(void)
I
Ingo Molnar 已提交
2656 2657
{
	struct task_struct *prev, *next;
2658
	unsigned long *switch_count;
I
Ingo Molnar 已提交
2659
	struct rq *rq;
2660
	int cpu;
I
Ingo Molnar 已提交
2661

2662 2663
need_resched:
	preempt_disable();
I
Ingo Molnar 已提交
2664 2665
	cpu = smp_processor_id();
	rq = cpu_rq(cpu);
2666
	rcu_note_context_switch(cpu);
I
Ingo Molnar 已提交
2667 2668 2669
	prev = rq->curr;

	schedule_debug(prev);
L
Linus Torvalds 已提交
2670

2671
	if (sched_feat(HRTICK))
M
Mike Galbraith 已提交
2672
		hrtick_clear(rq);
P
Peter Zijlstra 已提交
2673

2674 2675 2676 2677 2678 2679
	/*
	 * Make sure that signal_pending_state()->signal_pending() below
	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
	 * done by the caller to avoid the race with signal_wake_up().
	 */
	smp_mb__before_spinlock();
2680
	raw_spin_lock_irq(&rq->lock);
L
Linus Torvalds 已提交
2681

2682
	switch_count = &prev->nivcsw;
L
Linus Torvalds 已提交
2683
	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
T
Tejun Heo 已提交
2684
		if (unlikely(signal_pending_state(prev->state, prev))) {
L
Linus Torvalds 已提交
2685
			prev->state = TASK_RUNNING;
T
Tejun Heo 已提交
2686
		} else {
2687 2688 2689
			deactivate_task(rq, prev, DEQUEUE_SLEEP);
			prev->on_rq = 0;

T
Tejun Heo 已提交
2690
			/*
2691 2692 2693
			 * If a worker went to sleep, notify and ask workqueue
			 * whether it wants to wake up a task to maintain
			 * concurrency.
T
Tejun Heo 已提交
2694 2695 2696 2697 2698 2699 2700 2701 2702
			 */
			if (prev->flags & PF_WQ_WORKER) {
				struct task_struct *to_wakeup;

				to_wakeup = wq_worker_sleeping(prev, cpu);
				if (to_wakeup)
					try_to_wake_up_local(to_wakeup);
			}
		}
I
Ingo Molnar 已提交
2703
		switch_count = &prev->nvcsw;
L
Linus Torvalds 已提交
2704 2705
	}

2706 2707 2708 2709
	if (prev->on_rq || rq->skip_clock_update < 0)
		update_rq_clock(rq);

	next = pick_next_task(rq, prev);
2710
	clear_tsk_need_resched(prev);
2711
	clear_preempt_need_resched();
2712
	rq->skip_clock_update = 0;
L
Linus Torvalds 已提交
2713 2714 2715 2716 2717 2718

	if (likely(prev != next)) {
		rq->nr_switches++;
		rq->curr = next;
		++*switch_count;

I
Ingo Molnar 已提交
2719
		context_switch(rq, prev, next); /* unlocks the rq */
P
Peter Zijlstra 已提交
2720
		/*
2721 2722 2723 2724
		 * The context switch have flipped the stack from under us
		 * and restored the local variables which were saved when
		 * this task called schedule() in the past. prev == current
		 * is still correct, but it can be moved to another cpu/rq.
P
Peter Zijlstra 已提交
2725 2726 2727
		 */
		cpu = smp_processor_id();
		rq = cpu_rq(cpu);
L
Linus Torvalds 已提交
2728
	} else
2729
		raw_spin_unlock_irq(&rq->lock);
L
Linus Torvalds 已提交
2730

2731
	post_schedule(rq);
L
Linus Torvalds 已提交
2732

2733
	sched_preempt_enable_no_resched();
2734
	if (need_resched())
L
Linus Torvalds 已提交
2735 2736
		goto need_resched;
}
2737

2738 2739
static inline void sched_submit_work(struct task_struct *tsk)
{
2740
	if (!tsk->state || tsk_is_pi_blocked(tsk))
2741 2742 2743 2744 2745 2746 2747 2748 2749
		return;
	/*
	 * If we are going to sleep and we have plugged IO queued,
	 * make sure to submit it to avoid deadlocks.
	 */
	if (blk_needs_flush_plug(tsk))
		blk_schedule_flush_plug(tsk);
}

S
Simon Kirby 已提交
2750
asmlinkage void __sched schedule(void)
2751
{
2752 2753 2754
	struct task_struct *tsk = current;

	sched_submit_work(tsk);
2755 2756
	__schedule();
}
L
Linus Torvalds 已提交
2757 2758
EXPORT_SYMBOL(schedule);

2759
#ifdef CONFIG_CONTEXT_TRACKING
2760 2761 2762 2763 2764 2765 2766 2767
asmlinkage void __sched schedule_user(void)
{
	/*
	 * If we come here after a random call to set_need_resched(),
	 * or we have been woken up remotely but the IPI has not yet arrived,
	 * we haven't yet exited the RCU idle mode. Do it here manually until
	 * we find a better solution.
	 */
2768
	user_exit();
2769
	schedule();
2770
	user_enter();
2771 2772 2773
}
#endif

2774 2775 2776 2777 2778 2779 2780
/**
 * schedule_preempt_disabled - called with preemption disabled
 *
 * Returns with preemption disabled. Note: preempt_count must be 1
 */
void __sched schedule_preempt_disabled(void)
{
2781
	sched_preempt_enable_no_resched();
2782 2783 2784 2785
	schedule();
	preempt_disable();
}

L
Linus Torvalds 已提交
2786 2787
#ifdef CONFIG_PREEMPT
/*
2788
 * this is the entry point to schedule() from in-kernel preemption
I
Ingo Molnar 已提交
2789
 * off of preempt_enable. Kernel preemptions off return from interrupt
L
Linus Torvalds 已提交
2790 2791
 * occur there and call schedule directly.
 */
2792
asmlinkage void __sched notrace preempt_schedule(void)
L
Linus Torvalds 已提交
2793 2794 2795
{
	/*
	 * If there is a non-zero preempt_count or interrupts are disabled,
I
Ingo Molnar 已提交
2796
	 * we do not want to preempt the current task. Just return..
L
Linus Torvalds 已提交
2797
	 */
2798
	if (likely(!preemptible()))
L
Linus Torvalds 已提交
2799 2800
		return;

2801
	do {
2802
		__preempt_count_add(PREEMPT_ACTIVE);
2803
		__schedule();
2804
		__preempt_count_sub(PREEMPT_ACTIVE);
L
Linus Torvalds 已提交
2805

2806 2807 2808 2809 2810
		/*
		 * Check again in case we missed a preemption opportunity
		 * between schedule and now.
		 */
		barrier();
2811
	} while (need_resched());
L
Linus Torvalds 已提交
2812 2813
}
EXPORT_SYMBOL(preempt_schedule);
2814
#endif /* CONFIG_PREEMPT */
L
Linus Torvalds 已提交
2815 2816

/*
2817
 * this is the entry point to schedule() from kernel preemption
L
Linus Torvalds 已提交
2818 2819 2820 2821 2822 2823
 * off of irq context.
 * Note, that this is called and return with irqs disabled. This will
 * protect us against recursive calling from irq.
 */
asmlinkage void __sched preempt_schedule_irq(void)
{
2824
	enum ctx_state prev_state;
2825

2826
	/* Catch callers which need to be fixed */
2827
	BUG_ON(preempt_count() || !irqs_disabled());
L
Linus Torvalds 已提交
2828

2829 2830
	prev_state = exception_enter();

2831
	do {
2832
		__preempt_count_add(PREEMPT_ACTIVE);
2833
		local_irq_enable();
2834
		__schedule();
2835
		local_irq_disable();
2836
		__preempt_count_sub(PREEMPT_ACTIVE);
L
Linus Torvalds 已提交
2837

2838 2839 2840 2841 2842
		/*
		 * Check again in case we missed a preemption opportunity
		 * between schedule and now.
		 */
		barrier();
2843
	} while (need_resched());
2844 2845

	exception_exit(prev_state);
L
Linus Torvalds 已提交
2846 2847
}

P
Peter Zijlstra 已提交
2848
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
I
Ingo Molnar 已提交
2849
			  void *key)
L
Linus Torvalds 已提交
2850
{
P
Peter Zijlstra 已提交
2851
	return try_to_wake_up(curr->private, mode, wake_flags);
L
Linus Torvalds 已提交
2852 2853 2854
}
EXPORT_SYMBOL(default_wake_function);

2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
#ifdef CONFIG_RT_MUTEXES

/*
 * rt_mutex_setprio - set the current priority of a task
 * @p: task
 * @prio: prio value (kernel-internal form)
 *
 * This function changes the 'effective' priority of a task. It does
 * not touch ->normal_prio like __setscheduler().
 *
2865 2866
 * Used by the rt_mutex code to implement priority inheritance
 * logic. Call site only calls if the priority of the task changed.
2867
 */
2868
void rt_mutex_setprio(struct task_struct *p, int prio)
2869
{
2870
	int oldprio, on_rq, running, enqueue_flag = 0;
2871
	struct rq *rq;
2872
	const struct sched_class *prev_class;
2873

2874
	BUG_ON(prio > MAX_PRIO);
2875

2876
	rq = __task_rq_lock(p);
2877

2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
	/*
	 * Idle task boosting is a nono in general. There is one
	 * exception, when PREEMPT_RT and NOHZ is active:
	 *
	 * The idle task calls get_next_timer_interrupt() and holds
	 * the timer wheel base->lock on the CPU and another CPU wants
	 * to access the timer (probably to cancel it). We can safely
	 * ignore the boosting request, as the idle CPU runs this code
	 * with interrupts disabled and will complete the lock
	 * protected section without being interrupted. So there is no
	 * real need to boost.
	 */
	if (unlikely(p == rq->idle)) {
		WARN_ON(p != rq->curr);
		WARN_ON(p->pi_blocked_on);
		goto out_unlock;
	}

2896
	trace_sched_pi_setprio(p, prio);
2897
	p->pi_top_task = rt_mutex_get_top_task(p);
2898
	oldprio = p->prio;
2899
	prev_class = p->sched_class;
P
Peter Zijlstra 已提交
2900
	on_rq = p->on_rq;
2901
	running = task_current(rq, p);
2902
	if (on_rq)
2903
		dequeue_task(rq, p, 0);
2904 2905
	if (running)
		p->sched_class->put_prev_task(rq, p);
I
Ingo Molnar 已提交
2906

2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
	/*
	 * Boosting condition are:
	 * 1. -rt task is running and holds mutex A
	 *      --> -dl task blocks on mutex A
	 *
	 * 2. -dl task is running and holds mutex A
	 *      --> -dl task blocks on mutex A and could preempt the
	 *          running task
	 */
	if (dl_prio(prio)) {
		if (!dl_prio(p->normal_prio) || (p->pi_top_task &&
			dl_entity_preempt(&p->pi_top_task->dl, &p->dl))) {
			p->dl.dl_boosted = 1;
			p->dl.dl_throttled = 0;
			enqueue_flag = ENQUEUE_REPLENISH;
		} else
			p->dl.dl_boosted = 0;
2924
		p->sched_class = &dl_sched_class;
2925 2926 2927 2928 2929
	} else if (rt_prio(prio)) {
		if (dl_prio(oldprio))
			p->dl.dl_boosted = 0;
		if (oldprio < prio)
			enqueue_flag = ENQUEUE_HEAD;
I
Ingo Molnar 已提交
2930
		p->sched_class = &rt_sched_class;
2931 2932 2933
	} else {
		if (dl_prio(oldprio))
			p->dl.dl_boosted = 0;
I
Ingo Molnar 已提交
2934
		p->sched_class = &fair_sched_class;
2935
	}
I
Ingo Molnar 已提交
2936

2937 2938
	p->prio = prio;

2939 2940
	if (running)
		p->sched_class->set_curr_task(rq);
P
Peter Zijlstra 已提交
2941
	if (on_rq)
2942
		enqueue_task(rq, p, enqueue_flag);
2943

P
Peter Zijlstra 已提交
2944
	check_class_changed(rq, p, prev_class, oldprio);
2945
out_unlock:
2946
	__task_rq_unlock(rq);
2947 2948
}
#endif
2949

2950
void set_user_nice(struct task_struct *p, long nice)
L
Linus Torvalds 已提交
2951
{
I
Ingo Molnar 已提交
2952
	int old_prio, delta, on_rq;
L
Linus Torvalds 已提交
2953
	unsigned long flags;
2954
	struct rq *rq;
L
Linus Torvalds 已提交
2955

2956
	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
L
Linus Torvalds 已提交
2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
		return;
	/*
	 * We have to be careful, if called from sys_setpriority(),
	 * the task might be in the middle of scheduling on another CPU.
	 */
	rq = task_rq_lock(p, &flags);
	/*
	 * The RT priorities are set via sched_setscheduler(), but we still
	 * allow the 'normal' nice value to be set - but as expected
	 * it wont have any effect on scheduling until the task is
2967
	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
L
Linus Torvalds 已提交
2968
	 */
2969
	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
L
Linus Torvalds 已提交
2970 2971 2972
		p->static_prio = NICE_TO_PRIO(nice);
		goto out_unlock;
	}
P
Peter Zijlstra 已提交
2973
	on_rq = p->on_rq;
2974
	if (on_rq)
2975
		dequeue_task(rq, p, 0);
L
Linus Torvalds 已提交
2976 2977

	p->static_prio = NICE_TO_PRIO(nice);
2978
	set_load_weight(p);
2979 2980 2981
	old_prio = p->prio;
	p->prio = effective_prio(p);
	delta = p->prio - old_prio;
L
Linus Torvalds 已提交
2982

I
Ingo Molnar 已提交
2983
	if (on_rq) {
2984
		enqueue_task(rq, p, 0);
L
Linus Torvalds 已提交
2985
		/*
2986 2987
		 * If the task increased its priority or is running and
		 * lowered its priority, then reschedule its CPU:
L
Linus Torvalds 已提交
2988
		 */
2989
		if (delta < 0 || (delta > 0 && task_running(rq, p)))
L
Linus Torvalds 已提交
2990 2991 2992
			resched_task(rq->curr);
	}
out_unlock:
2993
	task_rq_unlock(rq, p, &flags);
L
Linus Torvalds 已提交
2994 2995 2996
}
EXPORT_SYMBOL(set_user_nice);

M
Matt Mackall 已提交
2997 2998 2999 3000 3001
/*
 * can_nice - check if a task can reduce its nice value
 * @p: task
 * @nice: nice value
 */
3002
int can_nice(const struct task_struct *p, const int nice)
M
Matt Mackall 已提交
3003
{
3004 3005
	/* convert nice value [19,-20] to rlimit style value [1,40] */
	int nice_rlim = 20 - nice;
3006

3007
	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
M
Matt Mackall 已提交
3008 3009 3010
		capable(CAP_SYS_NICE));
}

L
Linus Torvalds 已提交
3011 3012 3013 3014 3015 3016 3017 3018 3019
#ifdef __ARCH_WANT_SYS_NICE

/*
 * sys_nice - change the priority of the current process.
 * @increment: priority increment
 *
 * sys_setpriority is a more generic, but much slower function that
 * does similar things.
 */
3020
SYSCALL_DEFINE1(nice, int, increment)
L
Linus Torvalds 已提交
3021
{
3022
	long nice, retval;
L
Linus Torvalds 已提交
3023 3024 3025 3026 3027 3028

	/*
	 * Setpriority might change our priority at the same moment.
	 * We don't have to worry. Conceptually one call occurs first
	 * and we have a single winner.
	 */
M
Matt Mackall 已提交
3029 3030
	if (increment < -40)
		increment = -40;
L
Linus Torvalds 已提交
3031 3032 3033
	if (increment > 40)
		increment = 40;

3034
	nice = task_nice(current) + increment;
3035 3036 3037 3038
	if (nice < MIN_NICE)
		nice = MIN_NICE;
	if (nice > MAX_NICE)
		nice = MAX_NICE;
L
Linus Torvalds 已提交
3039

M
Matt Mackall 已提交
3040 3041 3042
	if (increment < 0 && !can_nice(current, nice))
		return -EPERM;

L
Linus Torvalds 已提交
3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056
	retval = security_task_setnice(current, nice);
	if (retval)
		return retval;

	set_user_nice(current, nice);
	return 0;
}

#endif

/**
 * task_prio - return the priority value of a given task.
 * @p: the task in question.
 *
3057
 * Return: The priority value as seen by users in /proc.
L
Linus Torvalds 已提交
3058 3059 3060
 * RT tasks are offset by -200. Normal tasks are centered
 * around 0, value goes from -16 to +15.
 */
3061
int task_prio(const struct task_struct *p)
L
Linus Torvalds 已提交
3062 3063 3064 3065 3066 3067 3068
{
	return p->prio - MAX_RT_PRIO;
}

/**
 * idle_cpu - is a given cpu idle currently?
 * @cpu: the processor in question.
3069 3070
 *
 * Return: 1 if the CPU is currently idle. 0 otherwise.
L
Linus Torvalds 已提交
3071 3072 3073
 */
int idle_cpu(int cpu)
{
T
Thomas Gleixner 已提交
3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
	struct rq *rq = cpu_rq(cpu);

	if (rq->curr != rq->idle)
		return 0;

	if (rq->nr_running)
		return 0;

#ifdef CONFIG_SMP
	if (!llist_empty(&rq->wake_list))
		return 0;
#endif

	return 1;
L
Linus Torvalds 已提交
3088 3089 3090 3091 3092
}

/**
 * idle_task - return the idle task for a given cpu.
 * @cpu: the processor in question.
3093 3094
 *
 * Return: The idle task for the cpu @cpu.
L
Linus Torvalds 已提交
3095
 */
3096
struct task_struct *idle_task(int cpu)
L
Linus Torvalds 已提交
3097 3098 3099 3100 3101 3102 3103
{
	return cpu_rq(cpu)->idle;
}

/**
 * find_process_by_pid - find a process with a matching PID value.
 * @pid: the pid in question.
3104 3105
 *
 * The task of @pid, if found. %NULL otherwise.
L
Linus Torvalds 已提交
3106
 */
A
Alexey Dobriyan 已提交
3107
static struct task_struct *find_process_by_pid(pid_t pid)
L
Linus Torvalds 已提交
3108
{
3109
	return pid ? find_task_by_vpid(pid) : current;
L
Linus Torvalds 已提交
3110 3111
}

3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127
/*
 * This function initializes the sched_dl_entity of a newly becoming
 * SCHED_DEADLINE task.
 *
 * Only the static values are considered here, the actual runtime and the
 * absolute deadline will be properly calculated when the task is enqueued
 * for the first time with its new policy.
 */
static void
__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
{
	struct sched_dl_entity *dl_se = &p->dl;

	init_dl_task_timer(dl_se);
	dl_se->dl_runtime = attr->sched_runtime;
	dl_se->dl_deadline = attr->sched_deadline;
3128
	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3129
	dl_se->flags = attr->sched_flags;
3130
	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3131 3132
	dl_se->dl_throttled = 0;
	dl_se->dl_new = 1;
3133
	dl_se->dl_yielded = 0;
3134 3135
}

3136 3137
static void __setscheduler_params(struct task_struct *p,
		const struct sched_attr *attr)
L
Linus Torvalds 已提交
3138
{
3139 3140
	int policy = attr->sched_policy;

3141 3142 3143
	if (policy == -1) /* setparam */
		policy = p->policy;

L
Linus Torvalds 已提交
3144
	p->policy = policy;
3145

3146 3147
	if (dl_policy(policy))
		__setparam_dl(p, attr);
3148
	else if (fair_policy(policy))
3149 3150
		p->static_prio = NICE_TO_PRIO(attr->sched_nice);

3151 3152 3153 3154 3155 3156
	/*
	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
	 * !rt_policy. Always setting this ensures that things like
	 * getparam()/getattr() don't report silly values for !rt tasks.
	 */
	p->rt_priority = attr->sched_priority;
3157
	p->normal_prio = normal_prio(p);
3158 3159
	set_load_weight(p);
}
3160

3161 3162 3163 3164 3165
/* Actually do priority change: must hold pi & rq lock. */
static void __setscheduler(struct rq *rq, struct task_struct *p,
			   const struct sched_attr *attr)
{
	__setscheduler_params(p, attr);
3166

3167 3168 3169 3170 3171 3172
	/*
	 * If we get here, there was no pi waiters boosting the
	 * task. It is safe to use the normal prio.
	 */
	p->prio = normal_prio(p);

3173 3174 3175
	if (dl_prio(p->prio))
		p->sched_class = &dl_sched_class;
	else if (rt_prio(p->prio))
3176 3177 3178
		p->sched_class = &rt_sched_class;
	else
		p->sched_class = &fair_sched_class;
L
Linus Torvalds 已提交
3179
}
3180 3181 3182 3183 3184 3185 3186 3187 3188

static void
__getparam_dl(struct task_struct *p, struct sched_attr *attr)
{
	struct sched_dl_entity *dl_se = &p->dl;

	attr->sched_priority = p->rt_priority;
	attr->sched_runtime = dl_se->dl_runtime;
	attr->sched_deadline = dl_se->dl_deadline;
3189
	attr->sched_period = dl_se->dl_period;
3190 3191 3192 3193 3194 3195
	attr->sched_flags = dl_se->flags;
}

/*
 * This function validates the new parameters of a -deadline task.
 * We ask for the deadline not being zero, and greater or equal
3196
 * than the runtime, as well as the period of being zero or
3197
 * greater than deadline. Furthermore, we have to be sure that
3198 3199 3200 3201
 * user parameters are above the internal resolution of 1us (we
 * check sched_runtime only since it is always the smaller one) and
 * below 2^63 ns (we have to check both sched_deadline and
 * sched_period, as the latter can be zero).
3202 3203 3204 3205
 */
static bool
__checkparam_dl(const struct sched_attr *attr)
{
3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231
	/* deadline != 0 */
	if (attr->sched_deadline == 0)
		return false;

	/*
	 * Since we truncate DL_SCALE bits, make sure we're at least
	 * that big.
	 */
	if (attr->sched_runtime < (1ULL << DL_SCALE))
		return false;

	/*
	 * Since we use the MSB for wrap-around and sign issues, make
	 * sure it's not set (mind that period can be equal to zero).
	 */
	if (attr->sched_deadline & (1ULL << 63) ||
	    attr->sched_period & (1ULL << 63))
		return false;

	/* runtime <= deadline <= period (if period != 0) */
	if ((attr->sched_period != 0 &&
	     attr->sched_period < attr->sched_deadline) ||
	    attr->sched_deadline < attr->sched_runtime)
		return false;

	return true;
3232 3233
}

3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
/*
 * check the target process has a UID that matches the current process's
 */
static bool check_same_owner(struct task_struct *p)
{
	const struct cred *cred = current_cred(), *pcred;
	bool match;

	rcu_read_lock();
	pcred = __task_cred(p);
3244 3245
	match = (uid_eq(cred->euid, pcred->euid) ||
		 uid_eq(cred->euid, pcred->uid));
3246 3247 3248 3249
	rcu_read_unlock();
	return match;
}

3250 3251 3252
static int __sched_setscheduler(struct task_struct *p,
				const struct sched_attr *attr,
				bool user)
L
Linus Torvalds 已提交
3253
{
3254 3255
	int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
		      MAX_RT_PRIO - 1 - attr->sched_priority;
3256
	int retval, oldprio, oldpolicy = -1, on_rq, running;
3257
	int policy = attr->sched_policy;
L
Linus Torvalds 已提交
3258
	unsigned long flags;
3259
	const struct sched_class *prev_class;
3260
	struct rq *rq;
3261
	int reset_on_fork;
L
Linus Torvalds 已提交
3262

3263 3264
	/* may grab non-irq protected spin_locks */
	BUG_ON(in_interrupt());
L
Linus Torvalds 已提交
3265 3266
recheck:
	/* double check policy once rq lock held */
3267 3268
	if (policy < 0) {
		reset_on_fork = p->sched_reset_on_fork;
L
Linus Torvalds 已提交
3269
		policy = oldpolicy = p->policy;
3270
	} else {
3271
		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
3272

3273 3274
		if (policy != SCHED_DEADLINE &&
				policy != SCHED_FIFO && policy != SCHED_RR &&
3275 3276 3277 3278 3279
				policy != SCHED_NORMAL && policy != SCHED_BATCH &&
				policy != SCHED_IDLE)
			return -EINVAL;
	}

3280 3281 3282
	if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
		return -EINVAL;

L
Linus Torvalds 已提交
3283 3284
	/*
	 * Valid priorities for SCHED_FIFO and SCHED_RR are
I
Ingo Molnar 已提交
3285 3286
	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
	 * SCHED_BATCH and SCHED_IDLE is 0.
L
Linus Torvalds 已提交
3287
	 */
3288
	if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
3289
	    (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
L
Linus Torvalds 已提交
3290
		return -EINVAL;
3291 3292
	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
	    (rt_policy(policy) != (attr->sched_priority != 0)))
L
Linus Torvalds 已提交
3293 3294
		return -EINVAL;

3295 3296 3297
	/*
	 * Allow unprivileged RT tasks to decrease priority:
	 */
3298
	if (user && !capable(CAP_SYS_NICE)) {
3299
		if (fair_policy(policy)) {
3300
			if (attr->sched_nice < task_nice(p) &&
3301
			    !can_nice(p, attr->sched_nice))
3302 3303 3304
				return -EPERM;
		}

3305
		if (rt_policy(policy)) {
3306 3307
			unsigned long rlim_rtprio =
					task_rlimit(p, RLIMIT_RTPRIO);
3308 3309 3310 3311 3312 3313

			/* can't set/change the rt policy */
			if (policy != p->policy && !rlim_rtprio)
				return -EPERM;

			/* can't increase priority */
3314 3315
			if (attr->sched_priority > p->rt_priority &&
			    attr->sched_priority > rlim_rtprio)
3316 3317
				return -EPERM;
		}
3318

3319 3320 3321 3322 3323 3324 3325 3326 3327
		 /*
		  * Can't set/change SCHED_DEADLINE policy at all for now
		  * (safest behavior); in the future we would like to allow
		  * unprivileged DL tasks to increase their relative deadline
		  * or reduce their runtime (both ways reducing utilization)
		  */
		if (dl_policy(policy))
			return -EPERM;

I
Ingo Molnar 已提交
3328
		/*
3329 3330
		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
I
Ingo Molnar 已提交
3331
		 */
3332
		if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3333
			if (!can_nice(p, task_nice(p)))
3334 3335
				return -EPERM;
		}
3336

3337
		/* can't change other user's priorities */
3338
		if (!check_same_owner(p))
3339
			return -EPERM;
3340 3341 3342 3343

		/* Normal users shall not reset the sched_reset_on_fork flag */
		if (p->sched_reset_on_fork && !reset_on_fork)
			return -EPERM;
3344
	}
L
Linus Torvalds 已提交
3345

3346
	if (user) {
3347
		retval = security_task_setscheduler(p);
3348 3349 3350 3351
		if (retval)
			return retval;
	}

3352 3353 3354
	/*
	 * make sure no PI-waiters arrive (or leave) while we are
	 * changing the priority of the task:
3355
	 *
L
Lucas De Marchi 已提交
3356
	 * To be able to change p->policy safely, the appropriate
L
Linus Torvalds 已提交
3357 3358
	 * runqueue lock must be held.
	 */
3359
	rq = task_rq_lock(p, &flags);
3360

3361 3362 3363 3364
	/*
	 * Changing the policy of the stop threads its a very bad idea
	 */
	if (p == rq->stop) {
3365
		task_rq_unlock(rq, p, &flags);
3366 3367 3368
		return -EINVAL;
	}

3369
	/*
3370 3371
	 * If not changing anything there's no need to proceed further,
	 * but store a possible modification of reset_on_fork.
3372
	 */
3373
	if (unlikely(policy == p->policy)) {
3374
		if (fair_policy(policy) && attr->sched_nice != task_nice(p))
3375 3376 3377
			goto change;
		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
			goto change;
3378 3379
		if (dl_policy(policy))
			goto change;
3380

3381
		p->sched_reset_on_fork = reset_on_fork;
3382
		task_rq_unlock(rq, p, &flags);
3383 3384
		return 0;
	}
3385
change:
3386

3387
	if (user) {
3388
#ifdef CONFIG_RT_GROUP_SCHED
3389 3390 3391 3392 3393
		/*
		 * Do not allow realtime tasks into groups that have no runtime
		 * assigned.
		 */
		if (rt_bandwidth_enabled() && rt_policy(policy) &&
3394 3395
				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
				!task_group_is_autogroup(task_group(p))) {
3396
			task_rq_unlock(rq, p, &flags);
3397 3398 3399
			return -EPERM;
		}
#endif
3400 3401 3402 3403 3404 3405 3406 3407 3408
#ifdef CONFIG_SMP
		if (dl_bandwidth_enabled() && dl_policy(policy)) {
			cpumask_t *span = rq->rd->span;

			/*
			 * Don't allow tasks with an affinity mask smaller than
			 * the entire root_domain to become SCHED_DEADLINE. We
			 * will also fail if there's no bandwidth available.
			 */
3409 3410
			if (!cpumask_subset(span, &p->cpus_allowed) ||
			    rq->rd->dl_bw.bw == 0) {
3411 3412 3413 3414 3415 3416
				task_rq_unlock(rq, p, &flags);
				return -EPERM;
			}
		}
#endif
	}
3417

L
Linus Torvalds 已提交
3418 3419 3420
	/* recheck policy now with rq lock held */
	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
		policy = oldpolicy = -1;
3421
		task_rq_unlock(rq, p, &flags);
L
Linus Torvalds 已提交
3422 3423
		goto recheck;
	}
3424 3425 3426 3427 3428 3429

	/*
	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
	 * is available.
	 */
3430
	if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
3431 3432 3433 3434
		task_rq_unlock(rq, p, &flags);
		return -EBUSY;
	}

3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
	p->sched_reset_on_fork = reset_on_fork;
	oldprio = p->prio;

	/*
	 * Special case for priority boosted tasks.
	 *
	 * If the new priority is lower or equal (user space view)
	 * than the current (boosted) priority, we just store the new
	 * normal parameters and do not touch the scheduler class and
	 * the runqueue. This will be done when the task deboost
	 * itself.
	 */
	if (rt_mutex_check_prio(p, newprio)) {
		__setscheduler_params(p, attr);
		task_rq_unlock(rq, p, &flags);
		return 0;
	}

P
Peter Zijlstra 已提交
3453
	on_rq = p->on_rq;
3454
	running = task_current(rq, p);
3455
	if (on_rq)
3456
		dequeue_task(rq, p, 0);
3457 3458
	if (running)
		p->sched_class->put_prev_task(rq, p);
3459

3460
	prev_class = p->sched_class;
3461
	__setscheduler(rq, p, attr);
3462

3463 3464
	if (running)
		p->sched_class->set_curr_task(rq);
3465 3466 3467 3468 3469 3470 3471
	if (on_rq) {
		/*
		 * We enqueue to tail when the priority of a task is
		 * increased (user space view).
		 */
		enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
	}
3472

P
Peter Zijlstra 已提交
3473
	check_class_changed(rq, p, prev_class, oldprio);
3474
	task_rq_unlock(rq, p, &flags);
3475

3476 3477
	rt_mutex_adjust_pi(p);

L
Linus Torvalds 已提交
3478 3479
	return 0;
}
3480

3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500
static int _sched_setscheduler(struct task_struct *p, int policy,
			       const struct sched_param *param, bool check)
{
	struct sched_attr attr = {
		.sched_policy   = policy,
		.sched_priority = param->sched_priority,
		.sched_nice	= PRIO_TO_NICE(p->static_prio),
	};

	/*
	 * Fixup the legacy SCHED_RESET_ON_FORK hack
	 */
	if (policy & SCHED_RESET_ON_FORK) {
		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
		policy &= ~SCHED_RESET_ON_FORK;
		attr.sched_policy = policy;
	}

	return __sched_setscheduler(p, &attr, check);
}
3501 3502 3503 3504 3505 3506
/**
 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
 * @p: the task in question.
 * @policy: new policy.
 * @param: structure containing the new RT priority.
 *
3507 3508
 * Return: 0 on success. An error code otherwise.
 *
3509 3510 3511
 * NOTE that the task may be already dead.
 */
int sched_setscheduler(struct task_struct *p, int policy,
3512
		       const struct sched_param *param)
3513
{
3514
	return _sched_setscheduler(p, policy, param, true);
3515
}
L
Linus Torvalds 已提交
3516 3517
EXPORT_SYMBOL_GPL(sched_setscheduler);

3518 3519 3520 3521 3522 3523
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
{
	return __sched_setscheduler(p, attr, true);
}
EXPORT_SYMBOL_GPL(sched_setattr);

3524 3525 3526 3527 3528 3529 3530 3531 3532 3533
/**
 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
 * @p: the task in question.
 * @policy: new policy.
 * @param: structure containing the new RT priority.
 *
 * Just like sched_setscheduler, only don't bother checking if the
 * current context has permission.  For example, this is needed in
 * stop_machine(): we create temporary high priority worker threads,
 * but our caller might not have that capability.
3534 3535
 *
 * Return: 0 on success. An error code otherwise.
3536 3537
 */
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3538
			       const struct sched_param *param)
3539
{
3540
	return _sched_setscheduler(p, policy, param, false);
3541 3542
}

I
Ingo Molnar 已提交
3543 3544
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
L
Linus Torvalds 已提交
3545 3546 3547
{
	struct sched_param lparam;
	struct task_struct *p;
3548
	int retval;
L
Linus Torvalds 已提交
3549 3550 3551 3552 3553

	if (!param || pid < 0)
		return -EINVAL;
	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
		return -EFAULT;
3554 3555 3556

	rcu_read_lock();
	retval = -ESRCH;
L
Linus Torvalds 已提交
3557
	p = find_process_by_pid(pid);
3558 3559 3560
	if (p != NULL)
		retval = sched_setscheduler(p, policy, &lparam);
	rcu_read_unlock();
3561

L
Linus Torvalds 已提交
3562 3563 3564
	return retval;
}

3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
/*
 * Mimics kernel/events/core.c perf_copy_attr().
 */
static int sched_copy_attr(struct sched_attr __user *uattr,
			   struct sched_attr *attr)
{
	u32 size;
	int ret;

	if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
		return -EFAULT;

	/*
	 * zero the full structure, so that a short copy will be nice.
	 */
	memset(attr, 0, sizeof(*attr));

	ret = get_user(size, &uattr->size);
	if (ret)
		return ret;

	if (size > PAGE_SIZE)	/* silly large */
		goto err_size;

	if (!size)		/* abi compat */
		size = SCHED_ATTR_SIZE_VER0;

	if (size < SCHED_ATTR_SIZE_VER0)
		goto err_size;

	/*
	 * If we're handed a bigger struct than we know of,
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
	 */
	if (size > sizeof(*attr)) {
		unsigned char __user *addr;
		unsigned char __user *end;
		unsigned char val;

		addr = (void __user *)uattr + sizeof(*attr);
		end  = (void __user *)uattr + size;

		for (; addr < end; addr++) {
			ret = get_user(val, addr);
			if (ret)
				return ret;
			if (val)
				goto err_size;
		}
		size = sizeof(*attr);
	}

	ret = copy_from_user(attr, uattr, size);
	if (ret)
		return -EFAULT;

	/*
	 * XXX: do we want to be lenient like existing syscalls; or do we want
	 * to be strict and return an error on out-of-bounds values?
	 */
3627
	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
3628 3629 3630 3631 3632 3633 3634 3635 3636 3637

out:
	return ret;

err_size:
	put_user(sizeof(*attr), &uattr->size);
	ret = -E2BIG;
	goto out;
}

L
Linus Torvalds 已提交
3638 3639 3640 3641 3642
/**
 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
 * @pid: the pid in question.
 * @policy: new policy.
 * @param: structure containing the new RT priority.
3643 3644
 *
 * Return: 0 on success. An error code otherwise.
L
Linus Torvalds 已提交
3645
 */
3646 3647
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
		struct sched_param __user *, param)
L
Linus Torvalds 已提交
3648
{
3649 3650 3651 3652
	/* negative values for policy are not valid */
	if (policy < 0)
		return -EINVAL;

L
Linus Torvalds 已提交
3653 3654 3655 3656 3657 3658 3659
	return do_sched_setscheduler(pid, policy, param);
}

/**
 * sys_sched_setparam - set/change the RT priority of a thread
 * @pid: the pid in question.
 * @param: structure containing the new RT priority.
3660 3661
 *
 * Return: 0 on success. An error code otherwise.
L
Linus Torvalds 已提交
3662
 */
3663
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
L
Linus Torvalds 已提交
3664 3665 3666 3667
{
	return do_sched_setscheduler(pid, -1, param);
}

3668 3669 3670
/**
 * sys_sched_setattr - same as above, but with extended sched_attr
 * @pid: the pid in question.
J
Juri Lelli 已提交
3671
 * @uattr: structure containing the extended parameters.
3672
 * @flags: for future extension.
3673
 */
3674 3675
SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
			       unsigned int, flags)
3676 3677 3678 3679 3680
{
	struct sched_attr attr;
	struct task_struct *p;
	int retval;

3681
	if (!uattr || pid < 0 || flags)
3682 3683
		return -EINVAL;

3684 3685 3686
	retval = sched_copy_attr(uattr, &attr);
	if (retval)
		return retval;
3687

3688 3689 3690
	if (attr.sched_policy < 0)
		return -EINVAL;

3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
	rcu_read_lock();
	retval = -ESRCH;
	p = find_process_by_pid(pid);
	if (p != NULL)
		retval = sched_setattr(p, &attr);
	rcu_read_unlock();

	return retval;
}

L
Linus Torvalds 已提交
3701 3702 3703
/**
 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
 * @pid: the pid in question.
3704 3705 3706
 *
 * Return: On success, the policy of the thread. Otherwise, a negative error
 * code.
L
Linus Torvalds 已提交
3707
 */
3708
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
L
Linus Torvalds 已提交
3709
{
3710
	struct task_struct *p;
3711
	int retval;
L
Linus Torvalds 已提交
3712 3713

	if (pid < 0)
3714
		return -EINVAL;
L
Linus Torvalds 已提交
3715 3716

	retval = -ESRCH;
3717
	rcu_read_lock();
L
Linus Torvalds 已提交
3718 3719 3720 3721
	p = find_process_by_pid(pid);
	if (p) {
		retval = security_task_getscheduler(p);
		if (!retval)
3722 3723
			retval = p->policy
				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
L
Linus Torvalds 已提交
3724
	}
3725
	rcu_read_unlock();
L
Linus Torvalds 已提交
3726 3727 3728 3729
	return retval;
}

/**
3730
 * sys_sched_getparam - get the RT priority of a thread
L
Linus Torvalds 已提交
3731 3732
 * @pid: the pid in question.
 * @param: structure containing the RT priority.
3733 3734 3735
 *
 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
 * code.
L
Linus Torvalds 已提交
3736
 */
3737
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
L
Linus Torvalds 已提交
3738
{
3739
	struct sched_param lp = { .sched_priority = 0 };
3740
	struct task_struct *p;
3741
	int retval;
L
Linus Torvalds 已提交
3742 3743

	if (!param || pid < 0)
3744
		return -EINVAL;
L
Linus Torvalds 已提交
3745

3746
	rcu_read_lock();
L
Linus Torvalds 已提交
3747 3748 3749 3750 3751 3752 3753 3754 3755
	p = find_process_by_pid(pid);
	retval = -ESRCH;
	if (!p)
		goto out_unlock;

	retval = security_task_getscheduler(p);
	if (retval)
		goto out_unlock;

3756 3757
	if (task_has_rt_policy(p))
		lp.sched_priority = p->rt_priority;
3758
	rcu_read_unlock();
L
Linus Torvalds 已提交
3759 3760 3761 3762 3763 3764 3765 3766 3767

	/*
	 * This one might sleep, we cannot do it with a spinlock held ...
	 */
	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;

	return retval;

out_unlock:
3768
	rcu_read_unlock();
L
Linus Torvalds 已提交
3769 3770 3771
	return retval;
}

3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800
static int sched_read_attr(struct sched_attr __user *uattr,
			   struct sched_attr *attr,
			   unsigned int usize)
{
	int ret;

	if (!access_ok(VERIFY_WRITE, uattr, usize))
		return -EFAULT;

	/*
	 * If we're handed a smaller struct than we know of,
	 * ensure all the unknown bits are 0 - i.e. old
	 * user-space does not get uncomplete information.
	 */
	if (usize < sizeof(*attr)) {
		unsigned char *addr;
		unsigned char *end;

		addr = (void *)attr + usize;
		end  = (void *)attr + sizeof(*attr);

		for (; addr < end; addr++) {
			if (*addr)
				goto err_size;
		}

		attr->size = usize;
	}

3801
	ret = copy_to_user(uattr, attr, attr->size);
3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
	if (ret)
		return -EFAULT;

out:
	return ret;

err_size:
	ret = -E2BIG;
	goto out;
}

/**
3814
 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
3815
 * @pid: the pid in question.
J
Juri Lelli 已提交
3816
 * @uattr: structure containing the extended parameters.
3817
 * @size: sizeof(attr) for fwd/bwd comp.
3818
 * @flags: for future extension.
3819
 */
3820 3821
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
		unsigned int, size, unsigned int, flags)
3822 3823 3824 3825 3826 3827 3828 3829
{
	struct sched_attr attr = {
		.size = sizeof(struct sched_attr),
	};
	struct task_struct *p;
	int retval;

	if (!uattr || pid < 0 || size > PAGE_SIZE ||
3830
	    size < SCHED_ATTR_SIZE_VER0 || flags)
3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843
		return -EINVAL;

	rcu_read_lock();
	p = find_process_by_pid(pid);
	retval = -ESRCH;
	if (!p)
		goto out_unlock;

	retval = security_task_getscheduler(p);
	if (retval)
		goto out_unlock;

	attr.sched_policy = p->policy;
3844 3845
	if (p->sched_reset_on_fork)
		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
3846 3847 3848
	if (task_has_dl_policy(p))
		__getparam_dl(p, &attr);
	else if (task_has_rt_policy(p))
3849 3850
		attr.sched_priority = p->rt_priority;
	else
3851
		attr.sched_nice = task_nice(p);
3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862

	rcu_read_unlock();

	retval = sched_read_attr(uattr, &attr, size);
	return retval;

out_unlock:
	rcu_read_unlock();
	return retval;
}

3863
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
L
Linus Torvalds 已提交
3864
{
3865
	cpumask_var_t cpus_allowed, new_mask;
3866 3867
	struct task_struct *p;
	int retval;
L
Linus Torvalds 已提交
3868

3869
	rcu_read_lock();
L
Linus Torvalds 已提交
3870 3871 3872

	p = find_process_by_pid(pid);
	if (!p) {
3873
		rcu_read_unlock();
L
Linus Torvalds 已提交
3874 3875 3876
		return -ESRCH;
	}

3877
	/* Prevent p going away */
L
Linus Torvalds 已提交
3878
	get_task_struct(p);
3879
	rcu_read_unlock();
L
Linus Torvalds 已提交
3880

3881 3882 3883 3884
	if (p->flags & PF_NO_SETAFFINITY) {
		retval = -EINVAL;
		goto out_put_task;
	}
3885 3886 3887 3888 3889 3890 3891 3892
	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_cpus_allowed;
	}
L
Linus Torvalds 已提交
3893
	retval = -EPERM;
E
Eric W. Biederman 已提交
3894 3895 3896 3897 3898 3899 3900 3901
	if (!check_same_owner(p)) {
		rcu_read_lock();
		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
			rcu_read_unlock();
			goto out_unlock;
		}
		rcu_read_unlock();
	}
L
Linus Torvalds 已提交
3902

3903
	retval = security_task_setscheduler(p);
3904 3905 3906
	if (retval)
		goto out_unlock;

3907 3908 3909 3910

	cpuset_cpus_allowed(p, cpus_allowed);
	cpumask_and(new_mask, in_mask, cpus_allowed);

3911 3912 3913 3914 3915 3916 3917 3918 3919 3920
	/*
	 * Since bandwidth control happens on root_domain basis,
	 * if admission test is enabled, we only admit -deadline
	 * tasks allowed to run on all the CPUs in the task's
	 * root_domain.
	 */
#ifdef CONFIG_SMP
	if (task_has_dl_policy(p)) {
		const struct cpumask *span = task_rq(p)->rd->span;

3921
		if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
3922 3923 3924 3925 3926
			retval = -EBUSY;
			goto out_unlock;
		}
	}
#endif
P
Peter Zijlstra 已提交
3927
again:
3928
	retval = set_cpus_allowed_ptr(p, new_mask);
L
Linus Torvalds 已提交
3929

P
Paul Menage 已提交
3930
	if (!retval) {
3931 3932
		cpuset_cpus_allowed(p, cpus_allowed);
		if (!cpumask_subset(new_mask, cpus_allowed)) {
P
Paul Menage 已提交
3933 3934 3935 3936 3937
			/*
			 * We must have raced with a concurrent cpuset
			 * update. Just reset the cpus_allowed to the
			 * cpuset's cpus_allowed
			 */
3938
			cpumask_copy(new_mask, cpus_allowed);
P
Paul Menage 已提交
3939 3940 3941
			goto again;
		}
	}
L
Linus Torvalds 已提交
3942
out_unlock:
3943 3944 3945 3946
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
	free_cpumask_var(cpus_allowed);
out_put_task:
L
Linus Torvalds 已提交
3947 3948 3949 3950 3951
	put_task_struct(p);
	return retval;
}

static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3952
			     struct cpumask *new_mask)
L
Linus Torvalds 已提交
3953
{
3954 3955 3956 3957 3958
	if (len < cpumask_size())
		cpumask_clear(new_mask);
	else if (len > cpumask_size())
		len = cpumask_size();

L
Linus Torvalds 已提交
3959 3960 3961 3962 3963 3964 3965 3966
	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
}

/**
 * sys_sched_setaffinity - set the cpu affinity of a process
 * @pid: pid of the process
 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
 * @user_mask_ptr: user-space pointer to the new cpu mask
3967 3968
 *
 * Return: 0 on success. An error code otherwise.
L
Linus Torvalds 已提交
3969
 */
3970 3971
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
		unsigned long __user *, user_mask_ptr)
L
Linus Torvalds 已提交
3972
{
3973
	cpumask_var_t new_mask;
L
Linus Torvalds 已提交
3974 3975
	int retval;

3976 3977
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
		return -ENOMEM;
L
Linus Torvalds 已提交
3978

3979 3980 3981 3982 3983
	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
	if (retval == 0)
		retval = sched_setaffinity(pid, new_mask);
	free_cpumask_var(new_mask);
	return retval;
L
Linus Torvalds 已提交
3984 3985
}

3986
long sched_getaffinity(pid_t pid, struct cpumask *mask)
L
Linus Torvalds 已提交
3987
{
3988
	struct task_struct *p;
3989
	unsigned long flags;
L
Linus Torvalds 已提交
3990 3991
	int retval;

3992
	rcu_read_lock();
L
Linus Torvalds 已提交
3993 3994 3995 3996 3997 3998

	retval = -ESRCH;
	p = find_process_by_pid(pid);
	if (!p)
		goto out_unlock;

3999 4000 4001 4002
	retval = security_task_getscheduler(p);
	if (retval)
		goto out_unlock;

4003
	raw_spin_lock_irqsave(&p->pi_lock, flags);
4004
	cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
4005
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
L
Linus Torvalds 已提交
4006 4007

out_unlock:
4008
	rcu_read_unlock();
L
Linus Torvalds 已提交
4009

4010
	return retval;
L
Linus Torvalds 已提交
4011 4012 4013 4014 4015 4016 4017
}

/**
 * sys_sched_getaffinity - get the cpu affinity of a process
 * @pid: pid of the process
 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4018 4019
 *
 * Return: 0 on success. An error code otherwise.
L
Linus Torvalds 已提交
4020
 */
4021 4022
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
		unsigned long __user *, user_mask_ptr)
L
Linus Torvalds 已提交
4023 4024
{
	int ret;
4025
	cpumask_var_t mask;
L
Linus Torvalds 已提交
4026

A
Anton Blanchard 已提交
4027
	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4028 4029
		return -EINVAL;
	if (len & (sizeof(unsigned long)-1))
L
Linus Torvalds 已提交
4030 4031
		return -EINVAL;

4032 4033
	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
		return -ENOMEM;
L
Linus Torvalds 已提交
4034

4035 4036
	ret = sched_getaffinity(pid, mask);
	if (ret == 0) {
4037
		size_t retlen = min_t(size_t, len, cpumask_size());
4038 4039

		if (copy_to_user(user_mask_ptr, mask, retlen))
4040 4041
			ret = -EFAULT;
		else
4042
			ret = retlen;
4043 4044
	}
	free_cpumask_var(mask);
L
Linus Torvalds 已提交
4045

4046
	return ret;
L
Linus Torvalds 已提交
4047 4048 4049 4050 4051
}

/**
 * sys_sched_yield - yield the current processor to other threads.
 *
I
Ingo Molnar 已提交
4052 4053
 * This function yields the current CPU to other tasks. If there are no
 * other threads running on this CPU then this function will return.
4054 4055
 *
 * Return: 0.
L
Linus Torvalds 已提交
4056
 */
4057
SYSCALL_DEFINE0(sched_yield)
L
Linus Torvalds 已提交
4058
{
4059
	struct rq *rq = this_rq_lock();
L
Linus Torvalds 已提交
4060

4061
	schedstat_inc(rq, yld_count);
4062
	current->sched_class->yield_task(rq);
L
Linus Torvalds 已提交
4063 4064 4065 4066 4067 4068

	/*
	 * Since we are going to call schedule() anyway, there's
	 * no need to preempt or enable interrupts:
	 */
	__release(rq->lock);
4069
	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4070
	do_raw_spin_unlock(&rq->lock);
4071
	sched_preempt_enable_no_resched();
L
Linus Torvalds 已提交
4072 4073 4074 4075 4076 4077

	schedule();

	return 0;
}

A
Andrew Morton 已提交
4078
static void __cond_resched(void)
L
Linus Torvalds 已提交
4079
{
4080
	__preempt_count_add(PREEMPT_ACTIVE);
4081
	__schedule();
4082
	__preempt_count_sub(PREEMPT_ACTIVE);
L
Linus Torvalds 已提交
4083 4084
}

4085
int __sched _cond_resched(void)
L
Linus Torvalds 已提交
4086
{
P
Peter Zijlstra 已提交
4087
	if (should_resched()) {
L
Linus Torvalds 已提交
4088 4089 4090 4091 4092
		__cond_resched();
		return 1;
	}
	return 0;
}
4093
EXPORT_SYMBOL(_cond_resched);
L
Linus Torvalds 已提交
4094 4095

/*
4096
 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
L
Linus Torvalds 已提交
4097 4098
 * call schedule, and on return reacquire the lock.
 *
I
Ingo Molnar 已提交
4099
 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
L
Linus Torvalds 已提交
4100 4101 4102
 * operations here to prevent schedule() from being called twice (once via
 * spin_unlock(), once by hand).
 */
4103
int __cond_resched_lock(spinlock_t *lock)
L
Linus Torvalds 已提交
4104
{
P
Peter Zijlstra 已提交
4105
	int resched = should_resched();
J
Jan Kara 已提交
4106 4107
	int ret = 0;

4108 4109
	lockdep_assert_held(lock);

N
Nick Piggin 已提交
4110
	if (spin_needbreak(lock) || resched) {
L
Linus Torvalds 已提交
4111
		spin_unlock(lock);
P
Peter Zijlstra 已提交
4112
		if (resched)
N
Nick Piggin 已提交
4113 4114 4115
			__cond_resched();
		else
			cpu_relax();
J
Jan Kara 已提交
4116
		ret = 1;
L
Linus Torvalds 已提交
4117 4118
		spin_lock(lock);
	}
J
Jan Kara 已提交
4119
	return ret;
L
Linus Torvalds 已提交
4120
}
4121
EXPORT_SYMBOL(__cond_resched_lock);
L
Linus Torvalds 已提交
4122

4123
int __sched __cond_resched_softirq(void)
L
Linus Torvalds 已提交
4124 4125 4126
{
	BUG_ON(!in_softirq());

P
Peter Zijlstra 已提交
4127
	if (should_resched()) {
4128
		local_bh_enable();
L
Linus Torvalds 已提交
4129 4130 4131 4132 4133 4134
		__cond_resched();
		local_bh_disable();
		return 1;
	}
	return 0;
}
4135
EXPORT_SYMBOL(__cond_resched_softirq);
L
Linus Torvalds 已提交
4136 4137 4138 4139

/**
 * yield - yield the current processor to other threads.
 *
P
Peter Zijlstra 已提交
4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157
 * Do not ever use this function, there's a 99% chance you're doing it wrong.
 *
 * The scheduler is at all times free to pick the calling task as the most
 * eligible task to run, if removing the yield() call from your code breaks
 * it, its already broken.
 *
 * Typical broken usage is:
 *
 * while (!event)
 * 	yield();
 *
 * where one assumes that yield() will let 'the other' process run that will
 * make event true. If the current task is a SCHED_FIFO task that will never
 * happen. Never use yield() as a progress guarantee!!
 *
 * If you want to use yield() to wait for something, use wait_event().
 * If you want to use yield() to be 'nice' for others, use cond_resched().
 * If you still want to use yield(), do not!
L
Linus Torvalds 已提交
4158 4159 4160 4161 4162 4163 4164 4165
 */
void __sched yield(void)
{
	set_current_state(TASK_RUNNING);
	sys_sched_yield();
}
EXPORT_SYMBOL(yield);

4166 4167 4168 4169
/**
 * yield_to - yield the current processor to another thread in
 * your thread group, or accelerate that thread toward the
 * processor it's on.
R
Randy Dunlap 已提交
4170 4171
 * @p: target task
 * @preempt: whether task preemption is allowed or not
4172 4173 4174 4175
 *
 * It's the caller's job to ensure that the target task struct
 * can't go away on us before we can do any checks.
 *
4176
 * Return:
4177 4178 4179
 *	true (>0) if we indeed boosted the target task.
 *	false (0) if we failed to boost the target.
 *	-ESRCH if there's no task to yield to.
4180 4181 4182 4183 4184 4185
 */
bool __sched yield_to(struct task_struct *p, bool preempt)
{
	struct task_struct *curr = current;
	struct rq *rq, *p_rq;
	unsigned long flags;
4186
	int yielded = 0;
4187 4188 4189 4190 4191 4192

	local_irq_save(flags);
	rq = this_rq();

again:
	p_rq = task_rq(p);
4193 4194 4195 4196 4197 4198 4199 4200 4201
	/*
	 * If we're the only runnable task on the rq and target rq also
	 * has only one task, there's absolutely no point in yielding.
	 */
	if (rq->nr_running == 1 && p_rq->nr_running == 1) {
		yielded = -ESRCH;
		goto out_irq;
	}

4202
	double_rq_lock(rq, p_rq);
4203
	if (task_rq(p) != p_rq) {
4204 4205 4206 4207 4208
		double_rq_unlock(rq, p_rq);
		goto again;
	}

	if (!curr->sched_class->yield_to_task)
4209
		goto out_unlock;
4210 4211

	if (curr->sched_class != p->sched_class)
4212
		goto out_unlock;
4213 4214

	if (task_running(p_rq, p) || p->state)
4215
		goto out_unlock;
4216 4217

	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4218
	if (yielded) {
4219
		schedstat_inc(rq, yld_count);
4220 4221 4222 4223 4224 4225 4226
		/*
		 * Make p's CPU reschedule; pick_next_entity takes care of
		 * fairness.
		 */
		if (preempt && rq != p_rq)
			resched_task(p_rq->curr);
	}
4227

4228
out_unlock:
4229
	double_rq_unlock(rq, p_rq);
4230
out_irq:
4231 4232
	local_irq_restore(flags);

4233
	if (yielded > 0)
4234 4235 4236 4237 4238 4239
		schedule();

	return yielded;
}
EXPORT_SYMBOL_GPL(yield_to);

L
Linus Torvalds 已提交
4240
/*
I
Ingo Molnar 已提交
4241
 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
L
Linus Torvalds 已提交
4242 4243 4244 4245
 * that process accounting knows that this is a task in IO wait state.
 */
void __sched io_schedule(void)
{
4246
	struct rq *rq = raw_rq();
L
Linus Torvalds 已提交
4247

4248
	delayacct_blkio_start();
L
Linus Torvalds 已提交
4249
	atomic_inc(&rq->nr_iowait);
4250
	blk_flush_plug(current);
4251
	current->in_iowait = 1;
L
Linus Torvalds 已提交
4252
	schedule();
4253
	current->in_iowait = 0;
L
Linus Torvalds 已提交
4254
	atomic_dec(&rq->nr_iowait);
4255
	delayacct_blkio_end();
L
Linus Torvalds 已提交
4256 4257 4258 4259 4260
}
EXPORT_SYMBOL(io_schedule);

long __sched io_schedule_timeout(long timeout)
{
4261
	struct rq *rq = raw_rq();
L
Linus Torvalds 已提交
4262 4263
	long ret;

4264
	delayacct_blkio_start();
L
Linus Torvalds 已提交
4265
	atomic_inc(&rq->nr_iowait);
4266
	blk_flush_plug(current);
4267
	current->in_iowait = 1;
L
Linus Torvalds 已提交
4268
	ret = schedule_timeout(timeout);
4269
	current->in_iowait = 0;
L
Linus Torvalds 已提交
4270
	atomic_dec(&rq->nr_iowait);
4271
	delayacct_blkio_end();
L
Linus Torvalds 已提交
4272 4273 4274 4275 4276 4277 4278
	return ret;
}

/**
 * sys_sched_get_priority_max - return maximum RT priority.
 * @policy: scheduling class.
 *
4279 4280 4281
 * Return: On success, this syscall returns the maximum
 * rt_priority that can be used by a given scheduling class.
 * On failure, a negative error code is returned.
L
Linus Torvalds 已提交
4282
 */
4283
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
L
Linus Torvalds 已提交
4284 4285 4286 4287 4288 4289 4290 4291
{
	int ret = -EINVAL;

	switch (policy) {
	case SCHED_FIFO:
	case SCHED_RR:
		ret = MAX_USER_RT_PRIO-1;
		break;
4292
	case SCHED_DEADLINE:
L
Linus Torvalds 已提交
4293
	case SCHED_NORMAL:
4294
	case SCHED_BATCH:
I
Ingo Molnar 已提交
4295
	case SCHED_IDLE:
L
Linus Torvalds 已提交
4296 4297 4298 4299 4300 4301 4302 4303 4304 4305
		ret = 0;
		break;
	}
	return ret;
}

/**
 * sys_sched_get_priority_min - return minimum RT priority.
 * @policy: scheduling class.
 *
4306 4307 4308
 * Return: On success, this syscall returns the minimum
 * rt_priority that can be used by a given scheduling class.
 * On failure, a negative error code is returned.
L
Linus Torvalds 已提交
4309
 */
4310
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
L
Linus Torvalds 已提交
4311 4312 4313 4314 4315 4316 4317 4318
{
	int ret = -EINVAL;

	switch (policy) {
	case SCHED_FIFO:
	case SCHED_RR:
		ret = 1;
		break;
4319
	case SCHED_DEADLINE:
L
Linus Torvalds 已提交
4320
	case SCHED_NORMAL:
4321
	case SCHED_BATCH:
I
Ingo Molnar 已提交
4322
	case SCHED_IDLE:
L
Linus Torvalds 已提交
4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334
		ret = 0;
	}
	return ret;
}

/**
 * sys_sched_rr_get_interval - return the default timeslice of a process.
 * @pid: pid of the process.
 * @interval: userspace pointer to the timeslice value.
 *
 * this syscall writes the default timeslice value of a given process
 * into the user-space timespec buffer. A value of '0' means infinity.
4335 4336 4337
 *
 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
 * an error code.
L
Linus Torvalds 已提交
4338
 */
4339
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4340
		struct timespec __user *, interval)
L
Linus Torvalds 已提交
4341
{
4342
	struct task_struct *p;
D
Dmitry Adamushko 已提交
4343
	unsigned int time_slice;
4344 4345
	unsigned long flags;
	struct rq *rq;
4346
	int retval;
L
Linus Torvalds 已提交
4347 4348 4349
	struct timespec t;

	if (pid < 0)
4350
		return -EINVAL;
L
Linus Torvalds 已提交
4351 4352

	retval = -ESRCH;
4353
	rcu_read_lock();
L
Linus Torvalds 已提交
4354 4355 4356 4357 4358 4359 4360 4361
	p = find_process_by_pid(pid);
	if (!p)
		goto out_unlock;

	retval = security_task_getscheduler(p);
	if (retval)
		goto out_unlock;

4362
	rq = task_rq_lock(p, &flags);
4363 4364 4365
	time_slice = 0;
	if (p->sched_class->get_rr_interval)
		time_slice = p->sched_class->get_rr_interval(rq, p);
4366
	task_rq_unlock(rq, p, &flags);
D
Dmitry Adamushko 已提交
4367

4368
	rcu_read_unlock();
D
Dmitry Adamushko 已提交
4369
	jiffies_to_timespec(time_slice, &t);
L
Linus Torvalds 已提交
4370 4371
	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
	return retval;
4372

L
Linus Torvalds 已提交
4373
out_unlock:
4374
	rcu_read_unlock();
L
Linus Torvalds 已提交
4375 4376 4377
	return retval;
}

4378
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4379

4380
void sched_show_task(struct task_struct *p)
L
Linus Torvalds 已提交
4381 4382
{
	unsigned long free = 0;
4383
	int ppid;
4384
	unsigned state;
L
Linus Torvalds 已提交
4385 4386

	state = p->state ? __ffs(p->state) + 1 : 0;
4387
	printk(KERN_INFO "%-15.15s %c", p->comm,
4388
		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4389
#if BITS_PER_LONG == 32
L
Linus Torvalds 已提交
4390
	if (state == TASK_RUNNING)
P
Peter Zijlstra 已提交
4391
		printk(KERN_CONT " running  ");
L
Linus Torvalds 已提交
4392
	else
P
Peter Zijlstra 已提交
4393
		printk(KERN_CONT " %08lx ", thread_saved_pc(p));
L
Linus Torvalds 已提交
4394 4395
#else
	if (state == TASK_RUNNING)
P
Peter Zijlstra 已提交
4396
		printk(KERN_CONT "  running task    ");
L
Linus Torvalds 已提交
4397
	else
P
Peter Zijlstra 已提交
4398
		printk(KERN_CONT " %016lx ", thread_saved_pc(p));
L
Linus Torvalds 已提交
4399 4400
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
4401
	free = stack_not_used(p);
L
Linus Torvalds 已提交
4402
#endif
4403 4404 4405
	rcu_read_lock();
	ppid = task_pid_nr(rcu_dereference(p->real_parent));
	rcu_read_unlock();
P
Peter Zijlstra 已提交
4406
	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4407
		task_pid_nr(p), ppid,
4408
		(unsigned long)task_thread_info(p)->flags);
L
Linus Torvalds 已提交
4409

4410
	print_worker_info(KERN_INFO, p);
4411
	show_stack(p, NULL);
L
Linus Torvalds 已提交
4412 4413
}

I
Ingo Molnar 已提交
4414
void show_state_filter(unsigned long state_filter)
L
Linus Torvalds 已提交
4415
{
4416
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
4417

4418
#if BITS_PER_LONG == 32
P
Peter Zijlstra 已提交
4419 4420
	printk(KERN_INFO
		"  task                PC stack   pid father\n");
L
Linus Torvalds 已提交
4421
#else
P
Peter Zijlstra 已提交
4422 4423
	printk(KERN_INFO
		"  task                        PC stack   pid father\n");
L
Linus Torvalds 已提交
4424
#endif
4425
	rcu_read_lock();
L
Linus Torvalds 已提交
4426 4427 4428
	do_each_thread(g, p) {
		/*
		 * reset the NMI-timeout, listing all files on a slow
L
Lucas De Marchi 已提交
4429
		 * console might take a lot of time:
L
Linus Torvalds 已提交
4430 4431
		 */
		touch_nmi_watchdog();
I
Ingo Molnar 已提交
4432
		if (!state_filter || (p->state & state_filter))
4433
			sched_show_task(p);
L
Linus Torvalds 已提交
4434 4435
	} while_each_thread(g, p);

4436 4437
	touch_all_softlockup_watchdogs();

I
Ingo Molnar 已提交
4438 4439 4440
#ifdef CONFIG_SCHED_DEBUG
	sysrq_sched_debug_show();
#endif
4441
	rcu_read_unlock();
I
Ingo Molnar 已提交
4442 4443 4444
	/*
	 * Only show locks if all tasks are dumped:
	 */
4445
	if (!state_filter)
I
Ingo Molnar 已提交
4446
		debug_show_all_locks();
L
Linus Torvalds 已提交
4447 4448
}

4449
void init_idle_bootup_task(struct task_struct *idle)
I
Ingo Molnar 已提交
4450
{
I
Ingo Molnar 已提交
4451
	idle->sched_class = &idle_sched_class;
I
Ingo Molnar 已提交
4452 4453
}

4454 4455 4456 4457 4458 4459 4460 4461
/**
 * init_idle - set up an idle thread for a given CPU
 * @idle: task in question
 * @cpu: cpu the idle task belongs to
 *
 * NOTE: this function does not set the idle thread's NEED_RESCHED
 * flag, to make booting more robust.
 */
4462
void init_idle(struct task_struct *idle, int cpu)
L
Linus Torvalds 已提交
4463
{
4464
	struct rq *rq = cpu_rq(cpu);
L
Linus Torvalds 已提交
4465 4466
	unsigned long flags;

4467
	raw_spin_lock_irqsave(&rq->lock, flags);
4468

4469
	__sched_fork(0, idle);
4470
	idle->state = TASK_RUNNING;
I
Ingo Molnar 已提交
4471 4472
	idle->se.exec_start = sched_clock();

4473
	do_set_cpus_allowed(idle, cpumask_of(cpu));
4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484
	/*
	 * We're having a chicken and egg problem, even though we are
	 * holding rq->lock, the cpu isn't yet set to this cpu so the
	 * lockdep check in task_group() will fail.
	 *
	 * Similar case to sched_fork(). / Alternatively we could
	 * use task_rq_lock() here and obtain the other rq->lock.
	 *
	 * Silence PROVE_RCU
	 */
	rcu_read_lock();
I
Ingo Molnar 已提交
4485
	__set_task_cpu(idle, cpu);
4486
	rcu_read_unlock();
L
Linus Torvalds 已提交
4487 4488

	rq->curr = rq->idle = idle;
4489
	idle->on_rq = 1;
P
Peter Zijlstra 已提交
4490 4491
#if defined(CONFIG_SMP)
	idle->on_cpu = 1;
4492
#endif
4493
	raw_spin_unlock_irqrestore(&rq->lock, flags);
L
Linus Torvalds 已提交
4494 4495

	/* Set the preempt count _outside_ the spinlocks! */
4496
	init_idle_preempt_count(idle, cpu);
4497

I
Ingo Molnar 已提交
4498 4499 4500 4501
	/*
	 * The idle tasks have their own, simple scheduling class:
	 */
	idle->sched_class = &idle_sched_class;
4502
	ftrace_graph_init_idle_task(idle, cpu);
4503
	vtime_init_idle(idle, cpu);
4504 4505 4506
#if defined(CONFIG_SMP)
	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
#endif
I
Ingo Molnar 已提交
4507 4508
}

L
Linus Torvalds 已提交
4509
#ifdef CONFIG_SMP
4510 4511 4512 4513
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
	if (p->sched_class && p->sched_class->set_cpus_allowed)
		p->sched_class->set_cpus_allowed(p, new_mask);
4514 4515

	cpumask_copy(&p->cpus_allowed, new_mask);
4516
	p->nr_cpus_allowed = cpumask_weight(new_mask);
4517 4518
}

L
Linus Torvalds 已提交
4519 4520 4521
/*
 * This is how migration works:
 *
4522 4523 4524 4525 4526 4527
 * 1) we invoke migration_cpu_stop() on the target CPU using
 *    stop_one_cpu().
 * 2) stopper starts to run (implicitly forcing the migrated thread
 *    off the CPU)
 * 3) it checks whether the migrated task is still in the wrong runqueue.
 * 4) if it's in the wrong runqueue then the migration thread removes
L
Linus Torvalds 已提交
4528
 *    it and puts it into the right queue.
4529 4530
 * 5) stopper completes and stop_one_cpu() returns and the migration
 *    is done.
L
Linus Torvalds 已提交
4531 4532 4533 4534 4535 4536 4537 4538
 */

/*
 * Change a given task's CPU affinity. Migrate the thread to a
 * proper CPU and schedule it away if the CPU it's executing on
 * is removed from the allowed bitmask.
 *
 * NOTE: the caller must have a valid reference to the task, the
I
Ingo Molnar 已提交
4539
 * task must not exit() & deallocate itself prematurely. The
L
Linus Torvalds 已提交
4540 4541
 * call is not atomic; no spinlocks may be held.
 */
4542
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
L
Linus Torvalds 已提交
4543 4544
{
	unsigned long flags;
4545
	struct rq *rq;
4546
	unsigned int dest_cpu;
4547
	int ret = 0;
L
Linus Torvalds 已提交
4548 4549

	rq = task_rq_lock(p, &flags);
4550

4551 4552 4553
	if (cpumask_equal(&p->cpus_allowed, new_mask))
		goto out;

4554
	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
L
Linus Torvalds 已提交
4555 4556 4557 4558
		ret = -EINVAL;
		goto out;
	}

4559
	do_set_cpus_allowed(p, new_mask);
4560

L
Linus Torvalds 已提交
4561
	/* Can the task run on the task's current CPU? If so, we're done */
4562
	if (cpumask_test_cpu(task_cpu(p), new_mask))
L
Linus Torvalds 已提交
4563 4564
		goto out;

4565
	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4566
	if (p->on_rq) {
4567
		struct migration_arg arg = { p, dest_cpu };
L
Linus Torvalds 已提交
4568
		/* Need help from migration thread: drop lock and wait. */
4569
		task_rq_unlock(rq, p, &flags);
4570
		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
L
Linus Torvalds 已提交
4571 4572 4573 4574
		tlb_migrate_finish(p->mm);
		return 0;
	}
out:
4575
	task_rq_unlock(rq, p, &flags);
4576

L
Linus Torvalds 已提交
4577 4578
	return ret;
}
4579
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
L
Linus Torvalds 已提交
4580 4581

/*
I
Ingo Molnar 已提交
4582
 * Move (not current) task off this cpu, onto dest cpu. We're doing
L
Linus Torvalds 已提交
4583 4584 4585 4586 4587 4588
 * this because either it can't run here any more (set_cpus_allowed()
 * away from this CPU, or CPU going down), or because we're
 * attempting to rebalance this task on exec (sched_exec).
 *
 * So we race with normal scheduler movements, but that's OK, as long
 * as the task is no longer on this CPU.
4589 4590
 *
 * Returns non-zero if task was successfully migrated.
L
Linus Torvalds 已提交
4591
 */
4592
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
L
Linus Torvalds 已提交
4593
{
4594
	struct rq *rq_dest, *rq_src;
4595
	int ret = 0;
L
Linus Torvalds 已提交
4596

4597
	if (unlikely(!cpu_active(dest_cpu)))
4598
		return ret;
L
Linus Torvalds 已提交
4599 4600 4601 4602

	rq_src = cpu_rq(src_cpu);
	rq_dest = cpu_rq(dest_cpu);

4603
	raw_spin_lock(&p->pi_lock);
L
Linus Torvalds 已提交
4604 4605 4606
	double_rq_lock(rq_src, rq_dest);
	/* Already moved. */
	if (task_cpu(p) != src_cpu)
L
Linus Torvalds 已提交
4607
		goto done;
L
Linus Torvalds 已提交
4608
	/* Affinity changed (again). */
4609
	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
L
Linus Torvalds 已提交
4610
		goto fail;
L
Linus Torvalds 已提交
4611

4612 4613 4614 4615
	/*
	 * If we're not on a rq, the next wake-up will ensure we're
	 * placed properly.
	 */
P
Peter Zijlstra 已提交
4616
	if (p->on_rq) {
4617
		dequeue_task(rq_src, p, 0);
4618
		set_task_cpu(p, dest_cpu);
4619
		enqueue_task(rq_dest, p, 0);
4620
		check_preempt_curr(rq_dest, p, 0);
L
Linus Torvalds 已提交
4621
	}
L
Linus Torvalds 已提交
4622
done:
4623
	ret = 1;
L
Linus Torvalds 已提交
4624
fail:
L
Linus Torvalds 已提交
4625
	double_rq_unlock(rq_src, rq_dest);
4626
	raw_spin_unlock(&p->pi_lock);
4627
	return ret;
L
Linus Torvalds 已提交
4628 4629
}

4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644
#ifdef CONFIG_NUMA_BALANCING
/* Migrate current task p to target_cpu */
int migrate_task_to(struct task_struct *p, int target_cpu)
{
	struct migration_arg arg = { p, target_cpu };
	int curr_cpu = task_cpu(p);

	if (curr_cpu == target_cpu)
		return 0;

	if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
		return -EINVAL;

	/* TODO: This is not properly updating schedstats */

4645
	trace_sched_move_numa(p, curr_cpu, target_cpu);
4646 4647
	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
}
4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675

/*
 * Requeue a task on a given node and accurately track the number of NUMA
 * tasks on the runqueues
 */
void sched_setnuma(struct task_struct *p, int nid)
{
	struct rq *rq;
	unsigned long flags;
	bool on_rq, running;

	rq = task_rq_lock(p, &flags);
	on_rq = p->on_rq;
	running = task_current(rq, p);

	if (on_rq)
		dequeue_task(rq, p, 0);
	if (running)
		p->sched_class->put_prev_task(rq, p);

	p->numa_preferred_nid = nid;

	if (running)
		p->sched_class->set_curr_task(rq);
	if (on_rq)
		enqueue_task(rq, p, 0);
	task_rq_unlock(rq, p, &flags);
}
4676 4677
#endif

L
Linus Torvalds 已提交
4678
/*
4679 4680 4681
 * migration_cpu_stop - this will be executed by a highprio stopper thread
 * and performs thread migration by bumping thread off CPU then
 * 'pushing' onto another runqueue.
L
Linus Torvalds 已提交
4682
 */
4683
static int migration_cpu_stop(void *data)
L
Linus Torvalds 已提交
4684
{
4685
	struct migration_arg *arg = data;
4686

4687 4688 4689 4690
	/*
	 * The original target cpu might have gone down and we might
	 * be on another cpu but it doesn't matter.
	 */
4691
	local_irq_disable();
4692
	__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
4693
	local_irq_enable();
L
Linus Torvalds 已提交
4694
	return 0;
4695 4696
}

L
Linus Torvalds 已提交
4697
#ifdef CONFIG_HOTPLUG_CPU
4698

4699
/*
4700 4701
 * Ensures that the idle task is using init_mm right before its cpu goes
 * offline.
4702
 */
4703
void idle_task_exit(void)
L
Linus Torvalds 已提交
4704
{
4705
	struct mm_struct *mm = current->active_mm;
4706

4707
	BUG_ON(cpu_online(smp_processor_id()));
4708

4709
	if (mm != &init_mm) {
4710
		switch_mm(mm, &init_mm, current);
4711 4712
		finish_arch_post_lock_switch();
	}
4713
	mmdrop(mm);
L
Linus Torvalds 已提交
4714 4715 4716
}

/*
4717 4718 4719 4720 4721
 * Since this CPU is going 'away' for a while, fold any nr_active delta
 * we might have. Assumes we're called after migrate_tasks() so that the
 * nr_active count is stable.
 *
 * Also see the comment "Global load-average calculations".
L
Linus Torvalds 已提交
4722
 */
4723
static void calc_load_migrate(struct rq *rq)
L
Linus Torvalds 已提交
4724
{
4725 4726 4727
	long delta = calc_load_fold_active(rq);
	if (delta)
		atomic_long_add(delta, &calc_load_tasks);
L
Linus Torvalds 已提交
4728 4729
}

4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745
static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
{
}

static const struct sched_class fake_sched_class = {
	.put_prev_task = put_prev_task_fake,
};

static struct task_struct fake_task = {
	/*
	 * Avoid pull_{rt,dl}_task()
	 */
	.prio = MAX_PRIO + 1,
	.sched_class = &fake_sched_class,
};

4746
/*
4747 4748 4749 4750 4751 4752
 * Migrate all tasks from the rq, sleeping tasks will be migrated by
 * try_to_wake_up()->select_task_rq().
 *
 * Called with rq->lock held even though we'er in stop_machine() and
 * there's no concurrency possible, we hold the required locks anyway
 * because of lock validation efforts.
L
Linus Torvalds 已提交
4753
 */
4754
static void migrate_tasks(unsigned int dead_cpu)
L
Linus Torvalds 已提交
4755
{
4756
	struct rq *rq = cpu_rq(dead_cpu);
4757 4758
	struct task_struct *next, *stop = rq->stop;
	int dest_cpu;
L
Linus Torvalds 已提交
4759 4760

	/*
4761 4762 4763 4764 4765 4766 4767
	 * Fudge the rq selection such that the below task selection loop
	 * doesn't get stuck on the currently eligible stop task.
	 *
	 * We're currently inside stop_machine() and the rq is either stuck
	 * in the stop_machine_cpu_stop() loop, or we're executing this code,
	 * either way we should never end up calling schedule() until we're
	 * done here.
L
Linus Torvalds 已提交
4768
	 */
4769
	rq->stop = NULL;
4770

4771 4772 4773 4774 4775 4776 4777
	/*
	 * put_prev_task() and pick_next_task() sched
	 * class method both need to have an up-to-date
	 * value of rq->clock[_task]
	 */
	update_rq_clock(rq);

I
Ingo Molnar 已提交
4778
	for ( ; ; ) {
4779 4780 4781 4782 4783
		/*
		 * There's this thread running, bail when that's the only
		 * remaining thread.
		 */
		if (rq->nr_running == 1)
I
Ingo Molnar 已提交
4784
			break;
4785

4786
		next = pick_next_task(rq, &fake_task);
4787
		BUG_ON(!next);
D
Dmitry Adamushko 已提交
4788
		next->sched_class->put_prev_task(rq, next);
4789

4790 4791 4792 4793 4794 4795 4796
		/* Find suitable destination for @next, with force if needed. */
		dest_cpu = select_fallback_rq(dead_cpu, next);
		raw_spin_unlock(&rq->lock);

		__migrate_task(next, dead_cpu, dest_cpu);

		raw_spin_lock(&rq->lock);
L
Linus Torvalds 已提交
4797
	}
4798

4799
	rq->stop = stop;
4800
}
4801

L
Linus Torvalds 已提交
4802 4803
#endif /* CONFIG_HOTPLUG_CPU */

4804 4805 4806
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)

static struct ctl_table sd_ctl_dir[] = {
4807 4808
	{
		.procname	= "sched_domain",
4809
		.mode		= 0555,
4810
	},
4811
	{}
4812 4813 4814
};

static struct ctl_table sd_ctl_root[] = {
4815 4816
	{
		.procname	= "kernel",
4817
		.mode		= 0555,
4818 4819
		.child		= sd_ctl_dir,
	},
4820
	{}
4821 4822 4823 4824 4825
};

static struct ctl_table *sd_alloc_ctl_entry(int n)
{
	struct ctl_table *entry =
4826
		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
4827 4828 4829 4830

	return entry;
}

4831 4832
static void sd_free_ctl_entry(struct ctl_table **tablep)
{
4833
	struct ctl_table *entry;
4834

4835 4836 4837
	/*
	 * In the intermediate directories, both the child directory and
	 * procname are dynamically allocated and could fail but the mode
I
Ingo Molnar 已提交
4838
	 * will always be set. In the lowest directory the names are
4839 4840 4841
	 * static strings and all have proc handlers.
	 */
	for (entry = *tablep; entry->mode; entry++) {
4842 4843
		if (entry->child)
			sd_free_ctl_entry(&entry->child);
4844 4845 4846
		if (entry->proc_handler == NULL)
			kfree(entry->procname);
	}
4847 4848 4849 4850 4851

	kfree(*tablep);
	*tablep = NULL;
}

4852
static int min_load_idx = 0;
4853
static int max_load_idx = CPU_LOAD_IDX_MAX-1;
4854

4855
static void
4856
set_table_entry(struct ctl_table *entry,
4857
		const char *procname, void *data, int maxlen,
4858 4859
		umode_t mode, proc_handler *proc_handler,
		bool load_idx)
4860 4861 4862 4863 4864 4865
{
	entry->procname = procname;
	entry->data = data;
	entry->maxlen = maxlen;
	entry->mode = mode;
	entry->proc_handler = proc_handler;
4866 4867 4868 4869 4870

	if (load_idx) {
		entry->extra1 = &min_load_idx;
		entry->extra2 = &max_load_idx;
	}
4871 4872 4873 4874 4875
}

static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
4876
	struct ctl_table *table = sd_alloc_ctl_entry(14);
4877

4878 4879 4880
	if (table == NULL)
		return NULL;

4881
	set_table_entry(&table[0], "min_interval", &sd->min_interval,
4882
		sizeof(long), 0644, proc_doulongvec_minmax, false);
4883
	set_table_entry(&table[1], "max_interval", &sd->max_interval,
4884
		sizeof(long), 0644, proc_doulongvec_minmax, false);
4885
	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
4886
		sizeof(int), 0644, proc_dointvec_minmax, true);
4887
	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
4888
		sizeof(int), 0644, proc_dointvec_minmax, true);
4889
	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
4890
		sizeof(int), 0644, proc_dointvec_minmax, true);
4891
	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
4892
		sizeof(int), 0644, proc_dointvec_minmax, true);
4893
	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
4894
		sizeof(int), 0644, proc_dointvec_minmax, true);
4895
	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
4896
		sizeof(int), 0644, proc_dointvec_minmax, false);
4897
	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
4898
		sizeof(int), 0644, proc_dointvec_minmax, false);
4899
	set_table_entry(&table[9], "cache_nice_tries",
4900
		&sd->cache_nice_tries,
4901
		sizeof(int), 0644, proc_dointvec_minmax, false);
4902
	set_table_entry(&table[10], "flags", &sd->flags,
4903
		sizeof(int), 0644, proc_dointvec_minmax, false);
4904 4905 4906 4907
	set_table_entry(&table[11], "max_newidle_lb_cost",
		&sd->max_newidle_lb_cost,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[12], "name", sd->name,
4908
		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
4909
	/* &table[13] is terminator */
4910 4911 4912 4913

	return table;
}

4914
static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
4915 4916 4917 4918 4919 4920 4921 4922 4923
{
	struct ctl_table *entry, *table;
	struct sched_domain *sd;
	int domain_num = 0, i;
	char buf[32];

	for_each_domain(cpu, sd)
		domain_num++;
	entry = table = sd_alloc_ctl_entry(domain_num + 1);
4924 4925
	if (table == NULL)
		return NULL;
4926 4927 4928 4929 4930

	i = 0;
	for_each_domain(cpu, sd) {
		snprintf(buf, 32, "domain%d", i);
		entry->procname = kstrdup(buf, GFP_KERNEL);
4931
		entry->mode = 0555;
4932 4933 4934 4935 4936 4937 4938 4939
		entry->child = sd_alloc_ctl_domain_table(sd);
		entry++;
		i++;
	}
	return table;
}

static struct ctl_table_header *sd_sysctl_header;
4940
static void register_sched_domain_sysctl(void)
4941
{
4942
	int i, cpu_num = num_possible_cpus();
4943 4944 4945
	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
	char buf[32];

4946 4947 4948
	WARN_ON(sd_ctl_dir[0].child);
	sd_ctl_dir[0].child = entry;

4949 4950 4951
	if (entry == NULL)
		return;

4952
	for_each_possible_cpu(i) {
4953 4954
		snprintf(buf, 32, "cpu%d", i);
		entry->procname = kstrdup(buf, GFP_KERNEL);
4955
		entry->mode = 0555;
4956
		entry->child = sd_alloc_ctl_cpu_table(i);
4957
		entry++;
4958
	}
4959 4960

	WARN_ON(sd_sysctl_header);
4961 4962
	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}
4963

4964
/* may be called multiple times per register */
4965 4966
static void unregister_sched_domain_sysctl(void)
{
4967 4968
	if (sd_sysctl_header)
		unregister_sysctl_table(sd_sysctl_header);
4969
	sd_sysctl_header = NULL;
4970 4971
	if (sd_ctl_dir[0].child)
		sd_free_ctl_entry(&sd_ctl_dir[0].child);
4972
}
4973
#else
4974 4975 4976 4977
static void register_sched_domain_sysctl(void)
{
}
static void unregister_sched_domain_sysctl(void)
4978 4979 4980 4981
{
}
#endif

4982 4983 4984 4985 4986
static void set_rq_online(struct rq *rq)
{
	if (!rq->online) {
		const struct sched_class *class;

4987
		cpumask_set_cpu(rq->cpu, rq->rd->online);
4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006
		rq->online = 1;

		for_each_class(class) {
			if (class->rq_online)
				class->rq_online(rq);
		}
	}
}

static void set_rq_offline(struct rq *rq)
{
	if (rq->online) {
		const struct sched_class *class;

		for_each_class(class) {
			if (class->rq_offline)
				class->rq_offline(rq);
		}

5007
		cpumask_clear_cpu(rq->cpu, rq->rd->online);
5008 5009 5010 5011
		rq->online = 0;
	}
}

L
Linus Torvalds 已提交
5012 5013 5014 5015
/*
 * migration_call - callback that gets triggered when a CPU is added.
 * Here we can start up the necessary migration thread for the new CPU.
 */
5016
static int
5017
migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
L
Linus Torvalds 已提交
5018
{
5019
	int cpu = (long)hcpu;
L
Linus Torvalds 已提交
5020
	unsigned long flags;
5021
	struct rq *rq = cpu_rq(cpu);
L
Linus Torvalds 已提交
5022

5023
	switch (action & ~CPU_TASKS_FROZEN) {
5024

L
Linus Torvalds 已提交
5025
	case CPU_UP_PREPARE:
5026
		rq->calc_load_update = calc_load_update;
L
Linus Torvalds 已提交
5027
		break;
5028

L
Linus Torvalds 已提交
5029
	case CPU_ONLINE:
5030
		/* Update our root-domain */
5031
		raw_spin_lock_irqsave(&rq->lock, flags);
5032
		if (rq->rd) {
5033
			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5034 5035

			set_rq_online(rq);
5036
		}
5037
		raw_spin_unlock_irqrestore(&rq->lock, flags);
L
Linus Torvalds 已提交
5038
		break;
5039

L
Linus Torvalds 已提交
5040
#ifdef CONFIG_HOTPLUG_CPU
5041
	case CPU_DYING:
5042
		sched_ttwu_pending();
G
Gregory Haskins 已提交
5043
		/* Update our root-domain */
5044
		raw_spin_lock_irqsave(&rq->lock, flags);
G
Gregory Haskins 已提交
5045
		if (rq->rd) {
5046
			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5047
			set_rq_offline(rq);
G
Gregory Haskins 已提交
5048
		}
5049 5050
		migrate_tasks(cpu);
		BUG_ON(rq->nr_running != 1); /* the migration thread */
5051
		raw_spin_unlock_irqrestore(&rq->lock, flags);
5052
		break;
5053

5054
	case CPU_DEAD:
5055
		calc_load_migrate(rq);
G
Gregory Haskins 已提交
5056
		break;
L
Linus Torvalds 已提交
5057 5058
#endif
	}
5059 5060 5061

	update_max_interval();

L
Linus Torvalds 已提交
5062 5063 5064
	return NOTIFY_OK;
}

5065 5066 5067
/*
 * Register at high priority so that task migration (migrate_all_tasks)
 * happens before everything else.  This has to be lower priority than
5068
 * the notifier in the perf_event subsystem, though.
L
Linus Torvalds 已提交
5069
 */
5070
static struct notifier_block migration_notifier = {
L
Linus Torvalds 已提交
5071
	.notifier_call = migration_call,
5072
	.priority = CPU_PRI_MIGRATION,
L
Linus Torvalds 已提交
5073 5074
};

5075
static int sched_cpu_active(struct notifier_block *nfb,
5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086
				      unsigned long action, void *hcpu)
{
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_DOWN_FAILED:
		set_cpu_active((long)hcpu, true);
		return NOTIFY_OK;
	default:
		return NOTIFY_DONE;
	}
}

5087
static int sched_cpu_inactive(struct notifier_block *nfb,
5088 5089
					unsigned long action, void *hcpu)
{
5090 5091 5092
	unsigned long flags;
	long cpu = (long)hcpu;

5093 5094
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_DOWN_PREPARE:
5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110
		set_cpu_active(cpu, false);

		/* explicitly allow suspend */
		if (!(action & CPU_TASKS_FROZEN)) {
			struct dl_bw *dl_b = dl_bw_of(cpu);
			bool overflow;
			int cpus;

			raw_spin_lock_irqsave(&dl_b->lock, flags);
			cpus = dl_bw_cpus(cpu);
			overflow = __dl_overflow(dl_b, cpus, 0, 0);
			raw_spin_unlock_irqrestore(&dl_b->lock, flags);

			if (overflow)
				return notifier_from_errno(-EBUSY);
		}
5111 5112
		return NOTIFY_OK;
	}
5113 5114

	return NOTIFY_DONE;
5115 5116
}

5117
static int __init migration_init(void)
L
Linus Torvalds 已提交
5118 5119
{
	void *cpu = (void *)(long)smp_processor_id();
5120
	int err;
5121

5122
	/* Initialize migration for the boot CPU */
5123 5124
	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
	BUG_ON(err == NOTIFY_BAD);
L
Linus Torvalds 已提交
5125 5126
	migration_call(&migration_notifier, CPU_ONLINE, cpu);
	register_cpu_notifier(&migration_notifier);
5127

5128 5129 5130 5131
	/* Register cpu active notifiers */
	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);

5132
	return 0;
L
Linus Torvalds 已提交
5133
}
5134
early_initcall(migration_init);
L
Linus Torvalds 已提交
5135 5136 5137
#endif

#ifdef CONFIG_SMP
5138

5139 5140
static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */

5141
#ifdef CONFIG_SCHED_DEBUG
I
Ingo Molnar 已提交
5142

5143
static __read_mostly int sched_debug_enabled;
5144

5145
static int __init sched_debug_setup(char *str)
5146
{
5147
	sched_debug_enabled = 1;
5148 5149 5150

	return 0;
}
5151 5152 5153 5154 5155 5156
early_param("sched_debug", sched_debug_setup);

static inline bool sched_debug(void)
{
	return sched_debug_enabled;
}
5157

5158
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5159
				  struct cpumask *groupmask)
L
Linus Torvalds 已提交
5160
{
I
Ingo Molnar 已提交
5161
	struct sched_group *group = sd->groups;
5162
	char str[256];
L
Linus Torvalds 已提交
5163

R
Rusty Russell 已提交
5164
	cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
5165
	cpumask_clear(groupmask);
I
Ingo Molnar 已提交
5166 5167 5168 5169

	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);

	if (!(sd->flags & SD_LOAD_BALANCE)) {
P
Peter Zijlstra 已提交
5170
		printk("does not load-balance\n");
I
Ingo Molnar 已提交
5171
		if (sd->parent)
P
Peter Zijlstra 已提交
5172 5173
			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
					" has parent");
I
Ingo Molnar 已提交
5174
		return -1;
N
Nick Piggin 已提交
5175 5176
	}

P
Peter Zijlstra 已提交
5177
	printk(KERN_CONT "span %s level %s\n", str, sd->name);
I
Ingo Molnar 已提交
5178

5179
	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
P
Peter Zijlstra 已提交
5180 5181
		printk(KERN_ERR "ERROR: domain->span does not contain "
				"CPU%d\n", cpu);
I
Ingo Molnar 已提交
5182
	}
5183
	if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
P
Peter Zijlstra 已提交
5184 5185
		printk(KERN_ERR "ERROR: domain->groups does not contain"
				" CPU%d\n", cpu);
I
Ingo Molnar 已提交
5186
	}
L
Linus Torvalds 已提交
5187

I
Ingo Molnar 已提交
5188
	printk(KERN_DEBUG "%*s groups:", level + 1, "");
L
Linus Torvalds 已提交
5189
	do {
I
Ingo Molnar 已提交
5190
		if (!group) {
P
Peter Zijlstra 已提交
5191 5192
			printk("\n");
			printk(KERN_ERR "ERROR: group is NULL\n");
L
Linus Torvalds 已提交
5193 5194 5195
			break;
		}

5196 5197 5198 5199 5200 5201
		/*
		 * Even though we initialize ->power to something semi-sane,
		 * we leave power_orig unset. This allows us to detect if
		 * domain iteration is still funny without causing /0 traps.
		 */
		if (!group->sgp->power_orig) {
P
Peter Zijlstra 已提交
5202 5203 5204
			printk(KERN_CONT "\n");
			printk(KERN_ERR "ERROR: domain->cpu_power not "
					"set\n");
I
Ingo Molnar 已提交
5205 5206
			break;
		}
L
Linus Torvalds 已提交
5207

5208
		if (!cpumask_weight(sched_group_cpus(group))) {
P
Peter Zijlstra 已提交
5209 5210
			printk(KERN_CONT "\n");
			printk(KERN_ERR "ERROR: empty group\n");
I
Ingo Molnar 已提交
5211 5212
			break;
		}
L
Linus Torvalds 已提交
5213

5214 5215
		if (!(sd->flags & SD_OVERLAP) &&
		    cpumask_intersects(groupmask, sched_group_cpus(group))) {
P
Peter Zijlstra 已提交
5216 5217
			printk(KERN_CONT "\n");
			printk(KERN_ERR "ERROR: repeated CPUs\n");
I
Ingo Molnar 已提交
5218 5219
			break;
		}
L
Linus Torvalds 已提交
5220

5221
		cpumask_or(groupmask, groupmask, sched_group_cpus(group));
L
Linus Torvalds 已提交
5222

R
Rusty Russell 已提交
5223
		cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
5224

P
Peter Zijlstra 已提交
5225
		printk(KERN_CONT " %s", str);
5226
		if (group->sgp->power != SCHED_POWER_SCALE) {
P
Peter Zijlstra 已提交
5227
			printk(KERN_CONT " (cpu_power = %d)",
5228
				group->sgp->power);
5229
		}
L
Linus Torvalds 已提交
5230

I
Ingo Molnar 已提交
5231 5232
		group = group->next;
	} while (group != sd->groups);
P
Peter Zijlstra 已提交
5233
	printk(KERN_CONT "\n");
L
Linus Torvalds 已提交
5234

5235
	if (!cpumask_equal(sched_domain_span(sd), groupmask))
P
Peter Zijlstra 已提交
5236
		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
L
Linus Torvalds 已提交
5237

5238 5239
	if (sd->parent &&
	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
P
Peter Zijlstra 已提交
5240 5241
		printk(KERN_ERR "ERROR: parent span is not a superset "
			"of domain->span\n");
I
Ingo Molnar 已提交
5242 5243
	return 0;
}
L
Linus Torvalds 已提交
5244

I
Ingo Molnar 已提交
5245 5246 5247
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
	int level = 0;
L
Linus Torvalds 已提交
5248

5249
	if (!sched_debug_enabled)
5250 5251
		return;

I
Ingo Molnar 已提交
5252 5253 5254 5255
	if (!sd) {
		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
		return;
	}
L
Linus Torvalds 已提交
5256

I
Ingo Molnar 已提交
5257 5258 5259
	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);

	for (;;) {
5260
		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
I
Ingo Molnar 已提交
5261
			break;
L
Linus Torvalds 已提交
5262 5263
		level++;
		sd = sd->parent;
5264
		if (!sd)
I
Ingo Molnar 已提交
5265 5266
			break;
	}
L
Linus Torvalds 已提交
5267
}
5268
#else /* !CONFIG_SCHED_DEBUG */
5269
# define sched_domain_debug(sd, cpu) do { } while (0)
5270 5271 5272 5273
static inline bool sched_debug(void)
{
	return false;
}
5274
#endif /* CONFIG_SCHED_DEBUG */
L
Linus Torvalds 已提交
5275

5276
static int sd_degenerate(struct sched_domain *sd)
5277
{
5278
	if (cpumask_weight(sched_domain_span(sd)) == 1)
5279 5280 5281 5282 5283 5284
		return 1;

	/* Following flags need at least 2 groups */
	if (sd->flags & (SD_LOAD_BALANCE |
			 SD_BALANCE_NEWIDLE |
			 SD_BALANCE_FORK |
5285 5286 5287
			 SD_BALANCE_EXEC |
			 SD_SHARE_CPUPOWER |
			 SD_SHARE_PKG_RESOURCES)) {
5288 5289 5290 5291 5292
		if (sd->groups != sd->groups->next)
			return 0;
	}

	/* Following flags don't use groups */
5293
	if (sd->flags & (SD_WAKE_AFFINE))
5294 5295 5296 5297 5298
		return 0;

	return 1;
}

5299 5300
static int
sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5301 5302 5303 5304 5305 5306
{
	unsigned long cflags = sd->flags, pflags = parent->flags;

	if (sd_degenerate(parent))
		return 1;

5307
	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5308 5309 5310 5311 5312 5313 5314
		return 0;

	/* Flags needing groups don't count if only 1 group in parent */
	if (parent->groups == parent->groups->next) {
		pflags &= ~(SD_LOAD_BALANCE |
				SD_BALANCE_NEWIDLE |
				SD_BALANCE_FORK |
5315 5316
				SD_BALANCE_EXEC |
				SD_SHARE_CPUPOWER |
5317 5318
				SD_SHARE_PKG_RESOURCES |
				SD_PREFER_SIBLING);
5319 5320
		if (nr_node_ids == 1)
			pflags &= ~SD_SERIALIZE;
5321 5322 5323 5324 5325 5326 5327
	}
	if (~cflags & pflags)
		return 0;

	return 1;
}

5328
static void free_rootdomain(struct rcu_head *rcu)
5329
{
5330
	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5331

5332
	cpupri_cleanup(&rd->cpupri);
5333
	cpudl_cleanup(&rd->cpudl);
5334
	free_cpumask_var(rd->dlo_mask);
5335 5336 5337 5338 5339 5340
	free_cpumask_var(rd->rto_mask);
	free_cpumask_var(rd->online);
	free_cpumask_var(rd->span);
	kfree(rd);
}

G
Gregory Haskins 已提交
5341 5342
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
I
Ingo Molnar 已提交
5343
	struct root_domain *old_rd = NULL;
G
Gregory Haskins 已提交
5344 5345
	unsigned long flags;

5346
	raw_spin_lock_irqsave(&rq->lock, flags);
G
Gregory Haskins 已提交
5347 5348

	if (rq->rd) {
I
Ingo Molnar 已提交
5349
		old_rd = rq->rd;
G
Gregory Haskins 已提交
5350

5351
		if (cpumask_test_cpu(rq->cpu, old_rd->online))
5352
			set_rq_offline(rq);
G
Gregory Haskins 已提交
5353

5354
		cpumask_clear_cpu(rq->cpu, old_rd->span);
5355

I
Ingo Molnar 已提交
5356
		/*
5357
		 * If we dont want to free the old_rd yet then
I
Ingo Molnar 已提交
5358 5359 5360 5361 5362
		 * set old_rd to NULL to skip the freeing later
		 * in this function:
		 */
		if (!atomic_dec_and_test(&old_rd->refcount))
			old_rd = NULL;
G
Gregory Haskins 已提交
5363 5364 5365 5366 5367
	}

	atomic_inc(&rd->refcount);
	rq->rd = rd;

5368
	cpumask_set_cpu(rq->cpu, rd->span);
5369
	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5370
		set_rq_online(rq);
G
Gregory Haskins 已提交
5371

5372
	raw_spin_unlock_irqrestore(&rq->lock, flags);
I
Ingo Molnar 已提交
5373 5374

	if (old_rd)
5375
		call_rcu_sched(&old_rd->rcu, free_rootdomain);
G
Gregory Haskins 已提交
5376 5377
}

5378
static int init_rootdomain(struct root_domain *rd)
G
Gregory Haskins 已提交
5379 5380 5381
{
	memset(rd, 0, sizeof(*rd));

5382
	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
5383
		goto out;
5384
	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
5385
		goto free_span;
5386
	if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5387
		goto free_online;
5388 5389
	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
		goto free_dlo_mask;
5390

5391
	init_dl_bw(&rd->dl_bw);
5392 5393
	if (cpudl_init(&rd->cpudl) != 0)
		goto free_dlo_mask;
5394

5395
	if (cpupri_init(&rd->cpupri) != 0)
5396
		goto free_rto_mask;
5397
	return 0;
5398

5399 5400
free_rto_mask:
	free_cpumask_var(rd->rto_mask);
5401 5402
free_dlo_mask:
	free_cpumask_var(rd->dlo_mask);
5403 5404 5405 5406
free_online:
	free_cpumask_var(rd->online);
free_span:
	free_cpumask_var(rd->span);
5407
out:
5408
	return -ENOMEM;
G
Gregory Haskins 已提交
5409 5410
}

5411 5412 5413 5414 5415 5416
/*
 * By default the system creates a single root-domain with all cpus as
 * members (mimicking the global state we have today).
 */
struct root_domain def_root_domain;

G
Gregory Haskins 已提交
5417 5418
static void init_defrootdomain(void)
{
5419
	init_rootdomain(&def_root_domain);
5420

G
Gregory Haskins 已提交
5421 5422 5423
	atomic_set(&def_root_domain.refcount, 1);
}

5424
static struct root_domain *alloc_rootdomain(void)
G
Gregory Haskins 已提交
5425 5426 5427 5428 5429 5430 5431
{
	struct root_domain *rd;

	rd = kmalloc(sizeof(*rd), GFP_KERNEL);
	if (!rd)
		return NULL;

5432
	if (init_rootdomain(rd) != 0) {
5433 5434 5435
		kfree(rd);
		return NULL;
	}
G
Gregory Haskins 已提交
5436 5437 5438 5439

	return rd;
}

5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458
static void free_sched_groups(struct sched_group *sg, int free_sgp)
{
	struct sched_group *tmp, *first;

	if (!sg)
		return;

	first = sg;
	do {
		tmp = sg->next;

		if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
			kfree(sg->sgp);

		kfree(sg);
		sg = tmp;
	} while (sg != first);
}

5459 5460 5461
static void free_sched_domain(struct rcu_head *rcu)
{
	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5462 5463 5464 5465 5466 5467 5468 5469

	/*
	 * If its an overlapping domain it has private groups, iterate and
	 * nuke them all.
	 */
	if (sd->flags & SD_OVERLAP) {
		free_sched_groups(sd->groups, 1);
	} else if (atomic_dec_and_test(&sd->groups->ref)) {
5470
		kfree(sd->groups->sgp);
5471
		kfree(sd->groups);
5472
	}
5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486
	kfree(sd);
}

static void destroy_sched_domain(struct sched_domain *sd, int cpu)
{
	call_rcu(&sd->rcu, free_sched_domain);
}

static void destroy_sched_domains(struct sched_domain *sd, int cpu)
{
	for (; sd; sd = sd->parent)
		destroy_sched_domain(sd, cpu);
}

5487 5488 5489 5490 5491 5492 5493
/*
 * Keep a special pointer to the highest sched_domain that has
 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
 * allows us to avoid some pointer chasing select_idle_sibling().
 *
 * Also keep a unique ID per domain (we use the first cpu number in
 * the cpumask of the domain), this allows us to quickly tell if
5494
 * two cpus are in the same cache domain, see cpus_share_cache().
5495 5496
 */
DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5497
DEFINE_PER_CPU(int, sd_llc_size);
5498
DEFINE_PER_CPU(int, sd_llc_id);
5499
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
5500 5501
DEFINE_PER_CPU(struct sched_domain *, sd_busy);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
5502 5503 5504 5505

static void update_top_cache_domain(int cpu)
{
	struct sched_domain *sd;
5506
	struct sched_domain *busy_sd = NULL;
5507
	int id = cpu;
5508
	int size = 1;
5509 5510

	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
5511
	if (sd) {
5512
		id = cpumask_first(sched_domain_span(sd));
5513
		size = cpumask_weight(sched_domain_span(sd));
5514
		busy_sd = sd->parent; /* sd_busy */
5515
	}
5516
	rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
5517 5518

	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5519
	per_cpu(sd_llc_size, cpu) = size;
5520
	per_cpu(sd_llc_id, cpu) = id;
5521 5522 5523

	sd = lowest_flag_domain(cpu, SD_NUMA);
	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
5524 5525 5526

	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
	rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
5527 5528
}

L
Linus Torvalds 已提交
5529
/*
I
Ingo Molnar 已提交
5530
 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
L
Linus Torvalds 已提交
5531 5532
 * hold the hotplug lock.
 */
I
Ingo Molnar 已提交
5533 5534
static void
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
L
Linus Torvalds 已提交
5535
{
5536
	struct rq *rq = cpu_rq(cpu);
5537 5538 5539
	struct sched_domain *tmp;

	/* Remove the sched domains which do not contribute to scheduling. */
5540
	for (tmp = sd; tmp; ) {
5541 5542 5543
		struct sched_domain *parent = tmp->parent;
		if (!parent)
			break;
5544

5545
		if (sd_parent_degenerate(tmp, parent)) {
5546
			tmp->parent = parent->parent;
5547 5548
			if (parent->parent)
				parent->parent->child = tmp;
5549 5550 5551 5552 5553 5554 5555
			/*
			 * Transfer SD_PREFER_SIBLING down in case of a
			 * degenerate parent; the spans match for this
			 * so the property transfers.
			 */
			if (parent->flags & SD_PREFER_SIBLING)
				tmp->flags |= SD_PREFER_SIBLING;
5556
			destroy_sched_domain(parent, cpu);
5557 5558
		} else
			tmp = tmp->parent;
5559 5560
	}

5561
	if (sd && sd_degenerate(sd)) {
5562
		tmp = sd;
5563
		sd = sd->parent;
5564
		destroy_sched_domain(tmp, cpu);
5565 5566 5567
		if (sd)
			sd->child = NULL;
	}
L
Linus Torvalds 已提交
5568

5569
	sched_domain_debug(sd, cpu);
L
Linus Torvalds 已提交
5570

G
Gregory Haskins 已提交
5571
	rq_attach_root(rq, rd);
5572
	tmp = rq->sd;
N
Nick Piggin 已提交
5573
	rcu_assign_pointer(rq->sd, sd);
5574
	destroy_sched_domains(tmp, cpu);
5575 5576

	update_top_cache_domain(cpu);
L
Linus Torvalds 已提交
5577 5578 5579
}

/* cpus with isolated domains */
5580
static cpumask_var_t cpu_isolated_map;
L
Linus Torvalds 已提交
5581 5582 5583 5584

/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
{
R
Rusty Russell 已提交
5585
	alloc_bootmem_cpumask_var(&cpu_isolated_map);
R
Rusty Russell 已提交
5586
	cpulist_parse(str, cpu_isolated_map);
L
Linus Torvalds 已提交
5587 5588 5589
	return 1;
}

I
Ingo Molnar 已提交
5590
__setup("isolcpus=", isolated_cpu_setup);
L
Linus Torvalds 已提交
5591

5592 5593 5594 5595 5596
static const struct cpumask *cpu_cpu_mask(int cpu)
{
	return cpumask_of_node(cpu_to_node(cpu));
}

5597 5598 5599
struct sd_data {
	struct sched_domain **__percpu sd;
	struct sched_group **__percpu sg;
5600
	struct sched_group_power **__percpu sgp;
5601 5602
};

5603
struct s_data {
5604
	struct sched_domain ** __percpu sd;
5605 5606 5607
	struct root_domain	*rd;
};

5608 5609
enum s_alloc {
	sa_rootdomain,
5610
	sa_sd,
5611
	sa_sd_storage,
5612 5613 5614
	sa_none,
};

5615 5616 5617
struct sched_domain_topology_level;

typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
5618 5619
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);

5620 5621
#define SDTL_OVERLAP	0x01

5622
struct sched_domain_topology_level {
5623 5624
	sched_domain_init_f init;
	sched_domain_mask_f mask;
5625
	int		    flags;
5626
	int		    numa_level;
5627
	struct sd_data      data;
5628 5629
};

P
Peter Zijlstra 已提交
5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667
/*
 * Build an iteration mask that can exclude certain CPUs from the upwards
 * domain traversal.
 *
 * Asymmetric node setups can result in situations where the domain tree is of
 * unequal depth, make sure to skip domains that already cover the entire
 * range.
 *
 * In that case build_sched_domains() will have terminated the iteration early
 * and our sibling sd spans will be empty. Domains should always include the
 * cpu they're built on, so check that.
 *
 */
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
{
	const struct cpumask *span = sched_domain_span(sd);
	struct sd_data *sdd = sd->private;
	struct sched_domain *sibling;
	int i;

	for_each_cpu(i, span) {
		sibling = *per_cpu_ptr(sdd->sd, i);
		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
			continue;

		cpumask_set_cpu(i, sched_group_mask(sg));
	}
}

/*
 * Return the canonical balance cpu for this group, this is the first cpu
 * of this group that's also in the iteration mask.
 */
int group_balance_cpu(struct sched_group *sg)
{
	return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
}

5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685
static int
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
{
	struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
	const struct cpumask *span = sched_domain_span(sd);
	struct cpumask *covered = sched_domains_tmpmask;
	struct sd_data *sdd = sd->private;
	struct sched_domain *child;
	int i;

	cpumask_clear(covered);

	for_each_cpu(i, span) {
		struct cpumask *sg_span;

		if (cpumask_test_cpu(i, covered))
			continue;

P
Peter Zijlstra 已提交
5686 5687 5688 5689 5690 5691
		child = *per_cpu_ptr(sdd->sd, i);

		/* See the comment near build_group_mask(). */
		if (!cpumask_test_cpu(i, sched_domain_span(child)))
			continue;

5692
		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5693
				GFP_KERNEL, cpu_to_node(cpu));
5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706

		if (!sg)
			goto fail;

		sg_span = sched_group_cpus(sg);
		if (child->child) {
			child = child->child;
			cpumask_copy(sg_span, sched_domain_span(child));
		} else
			cpumask_set_cpu(i, sg_span);

		cpumask_or(covered, covered, sg_span);

P
Peter Zijlstra 已提交
5707
		sg->sgp = *per_cpu_ptr(sdd->sgp, i);
P
Peter Zijlstra 已提交
5708 5709 5710
		if (atomic_inc_return(&sg->sgp->ref) == 1)
			build_group_mask(sd, sg);

5711 5712 5713 5714 5715 5716
		/*
		 * Initialize sgp->power such that even if we mess up the
		 * domains and no possible iteration will get us here, we won't
		 * die on a /0 trap.
		 */
		sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
5717
		sg->sgp->power_orig = sg->sgp->power;
5718

P
Peter Zijlstra 已提交
5719 5720 5721 5722 5723
		/*
		 * Make sure the first group of this domain contains the
		 * canonical balance cpu. Otherwise the sched_domain iteration
		 * breaks. See update_sg_lb_stats().
		 */
P
Peter Zijlstra 已提交
5724
		if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
P
Peter Zijlstra 已提交
5725
		    group_balance_cpu(sg) == cpu)
5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744
			groups = sg;

		if (!first)
			first = sg;
		if (last)
			last->next = sg;
		last = sg;
		last->next = first;
	}
	sd->groups = groups;

	return 0;

fail:
	free_sched_groups(first, 0);

	return -ENOMEM;
}

5745
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
L
Linus Torvalds 已提交
5746
{
5747 5748
	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
	struct sched_domain *child = sd->child;
L
Linus Torvalds 已提交
5749

5750 5751
	if (child)
		cpu = cpumask_first(sched_domain_span(child));
5752

5753
	if (sg) {
5754
		*sg = *per_cpu_ptr(sdd->sg, cpu);
5755
		(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
5756
		atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
5757
	}
5758 5759

	return cpu;
5760 5761
}

5762
/*
5763 5764 5765
 * build_sched_groups will build a circular linked list of the groups
 * covered by the given span, and will set each group's ->cpumask correctly,
 * and ->cpu_power to 0.
5766 5767
 *
 * Assumes the sched_domain tree is fully constructed
5768
 */
5769 5770
static int
build_sched_groups(struct sched_domain *sd, int cpu)
L
Linus Torvalds 已提交
5771
{
5772 5773 5774
	struct sched_group *first = NULL, *last = NULL;
	struct sd_data *sdd = sd->private;
	const struct cpumask *span = sched_domain_span(sd);
5775
	struct cpumask *covered;
5776
	int i;
5777

5778 5779 5780
	get_group(cpu, sdd, &sd->groups);
	atomic_inc(&sd->groups->ref);

5781
	if (cpu != cpumask_first(span))
5782 5783
		return 0;

5784 5785 5786
	lockdep_assert_held(&sched_domains_mutex);
	covered = sched_domains_tmpmask;

5787
	cpumask_clear(covered);
5788

5789 5790
	for_each_cpu(i, span) {
		struct sched_group *sg;
5791
		int group, j;
5792

5793 5794
		if (cpumask_test_cpu(i, covered))
			continue;
5795

5796
		group = get_group(i, sdd, &sg);
5797
		cpumask_clear(sched_group_cpus(sg));
5798
		sg->sgp->power = 0;
P
Peter Zijlstra 已提交
5799
		cpumask_setall(sched_group_mask(sg));
5800

5801 5802 5803
		for_each_cpu(j, span) {
			if (get_group(j, sdd, NULL) != group)
				continue;
5804

5805 5806 5807
			cpumask_set_cpu(j, covered);
			cpumask_set_cpu(j, sched_group_cpus(sg));
		}
5808

5809 5810 5811 5812 5813 5814 5815
		if (!first)
			first = sg;
		if (last)
			last->next = sg;
		last = sg;
	}
	last->next = first;
5816 5817

	return 0;
5818
}
5819

5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831
/*
 * Initialize sched groups cpu_power.
 *
 * cpu_power indicates the capacity of sched group, which is used while
 * distributing the load between different sched groups in a sched domain.
 * Typically cpu_power for all the groups in a sched domain will be same unless
 * there are asymmetries in the topology. If there are asymmetries, group
 * having more cpu_power will pickup more load compared to the group having
 * less cpu_power.
 */
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
5832
	struct sched_group *sg = sd->groups;
5833

5834
	WARN_ON(!sg);
5835 5836 5837 5838 5839

	do {
		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
		sg = sg->next;
	} while (sg != sd->groups);
5840

P
Peter Zijlstra 已提交
5841
	if (cpu != group_balance_cpu(sg))
5842
		return;
5843

5844
	update_group_power(sd, cpu);
5845
	atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
5846 5847
}

5848 5849 5850
int __weak arch_sd_sibling_asym_packing(void)
{
       return 0*SD_ASYM_PACKING;
5851 5852
}

5853 5854 5855 5856 5857
/*
 * Initializers for schedule domains
 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
 */

5858 5859 5860 5861 5862 5863
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(sd, type)		sd->name = #type
#else
# define SD_INIT_NAME(sd, type)		do { } while (0)
#endif

5864 5865 5866 5867 5868 5869 5870 5871 5872
#define SD_INIT_FUNC(type)						\
static noinline struct sched_domain *					\
sd_init_##type(struct sched_domain_topology_level *tl, int cpu) 	\
{									\
	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);	\
	*sd = SD_##type##_INIT;						\
	SD_INIT_NAME(sd, type);						\
	sd->private = &tl->data;					\
	return sd;							\
5873 5874 5875 5876 5877 5878 5879 5880 5881
}

SD_INIT_FUNC(CPU)
#ifdef CONFIG_SCHED_SMT
 SD_INIT_FUNC(SIBLING)
#endif
#ifdef CONFIG_SCHED_MC
 SD_INIT_FUNC(MC)
#endif
5882 5883 5884
#ifdef CONFIG_SCHED_BOOK
 SD_INIT_FUNC(BOOK)
#endif
5885

5886
static int default_relax_domain_level = -1;
5887
int sched_domain_level_max;
5888 5889 5890

static int __init setup_relax_domain_level(char *str)
{
5891 5892
	if (kstrtoint(str, 0, &default_relax_domain_level))
		pr_warn("Unable to set relax_domain_level\n");
5893

5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911
	return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);

static void set_domain_attribute(struct sched_domain *sd,
				 struct sched_domain_attr *attr)
{
	int request;

	if (!attr || attr->relax_domain_level < 0) {
		if (default_relax_domain_level < 0)
			return;
		else
			request = default_relax_domain_level;
	} else
		request = attr->relax_domain_level;
	if (request < sd->level) {
		/* turn off idle balance on this domain */
5912
		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
5913 5914
	} else {
		/* turn on idle balance on this domain */
5915
		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
5916 5917 5918
	}
}

5919 5920 5921
static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);

5922 5923 5924 5925 5926
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
				 const struct cpumask *cpu_map)
{
	switch (what) {
	case sa_rootdomain:
5927 5928
		if (!atomic_read(&d->rd->refcount))
			free_rootdomain(&d->rd->rcu); /* fall through */
5929 5930
	case sa_sd:
		free_percpu(d->sd); /* fall through */
5931
	case sa_sd_storage:
5932
		__sdt_free(cpu_map); /* fall through */
5933 5934 5935 5936
	case sa_none:
		break;
	}
}
5937

5938 5939 5940
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
						   const struct cpumask *cpu_map)
{
5941 5942
	memset(d, 0, sizeof(*d));

5943 5944
	if (__sdt_alloc(cpu_map))
		return sa_sd_storage;
5945 5946 5947
	d->sd = alloc_percpu(struct sched_domain *);
	if (!d->sd)
		return sa_sd_storage;
5948
	d->rd = alloc_rootdomain();
5949
	if (!d->rd)
5950
		return sa_sd;
5951 5952
	return sa_rootdomain;
}
G
Gregory Haskins 已提交
5953

5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965
/*
 * NULL the sd_data elements we've used to build the sched_domain and
 * sched_group structure so that the subsequent __free_domain_allocs()
 * will not free the data we're using.
 */
static void claim_allocations(int cpu, struct sched_domain *sd)
{
	struct sd_data *sdd = sd->private;

	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
	*per_cpu_ptr(sdd->sd, cpu) = NULL;

5966
	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
5967
		*per_cpu_ptr(sdd->sg, cpu) = NULL;
5968 5969

	if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
5970
		*per_cpu_ptr(sdd->sgp, cpu) = NULL;
5971 5972
}

5973 5974
#ifdef CONFIG_SCHED_SMT
static const struct cpumask *cpu_smt_mask(int cpu)
5975
{
5976
	return topology_thread_cpumask(cpu);
5977
}
5978
#endif
5979

5980 5981 5982
/*
 * Topology list, bottom-up.
 */
5983
static struct sched_domain_topology_level default_topology[] = {
5984 5985
#ifdef CONFIG_SCHED_SMT
	{ sd_init_SIBLING, cpu_smt_mask, },
5986
#endif
5987
#ifdef CONFIG_SCHED_MC
5988
	{ sd_init_MC, cpu_coregroup_mask, },
5989
#endif
5990 5991 5992 5993
#ifdef CONFIG_SCHED_BOOK
	{ sd_init_BOOK, cpu_book_mask, },
#endif
	{ sd_init_CPU, cpu_cpu_mask, },
5994 5995 5996 5997 5998
	{ NULL, },
};

static struct sched_domain_topology_level *sched_domain_topology = default_topology;

5999 6000 6001
#define for_each_sd_topology(tl)			\
	for (tl = sched_domain_topology; tl->init; tl++)

6002 6003 6004 6005 6006 6007 6008 6009 6010
#ifdef CONFIG_NUMA

static int sched_domains_numa_levels;
static int *sched_domains_numa_distance;
static struct cpumask ***sched_domains_numa_masks;
static int sched_domains_curr_level;

static inline int sd_local_flags(int level)
{
6011
	if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028
		return 0;

	return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
}

static struct sched_domain *
sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
{
	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
	int level = tl->numa_level;
	int sd_weight = cpumask_weight(
			sched_domains_numa_masks[level][cpu_to_node(cpu)]);

	*sd = (struct sched_domain){
		.min_interval		= sd_weight,
		.max_interval		= 2*sd_weight,
		.busy_factor		= 32,
6029
		.imbalance_pct		= 125,
6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046
		.cache_nice_tries	= 2,
		.busy_idx		= 3,
		.idle_idx		= 2,
		.newidle_idx		= 0,
		.wake_idx		= 0,
		.forkexec_idx		= 0,

		.flags			= 1*SD_LOAD_BALANCE
					| 1*SD_BALANCE_NEWIDLE
					| 0*SD_BALANCE_EXEC
					| 0*SD_BALANCE_FORK
					| 0*SD_BALANCE_WAKE
					| 0*SD_WAKE_AFFINE
					| 0*SD_SHARE_CPUPOWER
					| 0*SD_SHARE_PKG_RESOURCES
					| 1*SD_SERIALIZE
					| 0*SD_PREFER_SIBLING
6047
					| 1*SD_NUMA
6048 6049 6050 6051
					| sd_local_flags(level)
					,
		.last_balance		= jiffies,
		.balance_interval	= sd_weight,
6052 6053
		.max_newidle_lb_cost	= 0,
		.next_decay_max_lb_cost	= jiffies,
6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070
	};
	SD_INIT_NAME(sd, NUMA);
	sd->private = &tl->data;

	/*
	 * Ugly hack to pass state to sd_numa_mask()...
	 */
	sched_domains_curr_level = tl->numa_level;

	return sd;
}

static const struct cpumask *sd_numa_mask(int cpu)
{
	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
}

6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106
static void sched_numa_warn(const char *str)
{
	static int done = false;
	int i,j;

	if (done)
		return;

	done = true;

	printk(KERN_WARNING "ERROR: %s\n\n", str);

	for (i = 0; i < nr_node_ids; i++) {
		printk(KERN_WARNING "  ");
		for (j = 0; j < nr_node_ids; j++)
			printk(KERN_CONT "%02d ", node_distance(i,j));
		printk(KERN_CONT "\n");
	}
	printk(KERN_WARNING "\n");
}

static bool find_numa_distance(int distance)
{
	int i;

	if (distance == node_distance(0, 0))
		return true;

	for (i = 0; i < sched_domains_numa_levels; i++) {
		if (sched_domains_numa_distance[i] == distance)
			return true;
	}

	return false;
}

6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127
static void sched_init_numa(void)
{
	int next_distance, curr_distance = node_distance(0, 0);
	struct sched_domain_topology_level *tl;
	int level = 0;
	int i, j, k;

	sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
	if (!sched_domains_numa_distance)
		return;

	/*
	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
	 * unique distances in the node_distance() table.
	 *
	 * Assumes node_distance(0,j) includes all distances in
	 * node_distance(i,j) in order to avoid cubic time.
	 */
	next_distance = curr_distance;
	for (i = 0; i < nr_node_ids; i++) {
		for (j = 0; j < nr_node_ids; j++) {
6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151
			for (k = 0; k < nr_node_ids; k++) {
				int distance = node_distance(i, k);

				if (distance > curr_distance &&
				    (distance < next_distance ||
				     next_distance == curr_distance))
					next_distance = distance;

				/*
				 * While not a strong assumption it would be nice to know
				 * about cases where if node A is connected to B, B is not
				 * equally connected to A.
				 */
				if (sched_debug() && node_distance(k, i) != distance)
					sched_numa_warn("Node-distance not symmetric");

				if (sched_debug() && i && !find_numa_distance(distance))
					sched_numa_warn("Node-0 not representative");
			}
			if (next_distance != curr_distance) {
				sched_domains_numa_distance[level++] = next_distance;
				sched_domains_numa_levels = level;
				curr_distance = next_distance;
			} else break;
6152
		}
6153 6154 6155 6156 6157 6158

		/*
		 * In case of sched_debug() we verify the above assumption.
		 */
		if (!sched_debug())
			break;
6159 6160 6161 6162 6163
	}
	/*
	 * 'level' contains the number of unique distances, excluding the
	 * identity distance node_distance(i,i).
	 *
V
Viresh Kumar 已提交
6164
	 * The sched_domains_numa_distance[] array includes the actual distance
6165 6166 6167
	 * numbers.
	 */

6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178
	/*
	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
	 * the array will contain less then 'level' members. This could be
	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
	 * in other functions.
	 *
	 * We reset it to 'level' at the end of this function.
	 */
	sched_domains_numa_levels = 0;

6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193
	sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
	if (!sched_domains_numa_masks)
		return;

	/*
	 * Now for each level, construct a mask per node which contains all
	 * cpus of nodes that are that many hops away from us.
	 */
	for (i = 0; i < level; i++) {
		sched_domains_numa_masks[i] =
			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
		if (!sched_domains_numa_masks[i])
			return;

		for (j = 0; j < nr_node_ids; j++) {
6194
			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6195 6196 6197 6198 6199 6200
			if (!mask)
				return;

			sched_domains_numa_masks[i][j] = mask;

			for (k = 0; k < nr_node_ids; k++) {
6201
				if (node_distance(j, k) > sched_domains_numa_distance[i])
6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232
					continue;

				cpumask_or(mask, mask, cpumask_of_node(k));
			}
		}
	}

	tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
	if (!tl)
		return;

	/*
	 * Copy the default topology bits..
	 */
	for (i = 0; default_topology[i].init; i++)
		tl[i] = default_topology[i];

	/*
	 * .. and append 'j' levels of NUMA goodness.
	 */
	for (j = 0; j < level; i++, j++) {
		tl[i] = (struct sched_domain_topology_level){
			.init = sd_numa_init,
			.mask = sd_numa_mask,
			.flags = SDTL_OVERLAP,
			.numa_level = j,
		};
	}

	sched_domain_topology = tl;
6233 6234

	sched_domains_numa_levels = level;
6235
}
6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282

static void sched_domains_numa_masks_set(int cpu)
{
	int i, j;
	int node = cpu_to_node(cpu);

	for (i = 0; i < sched_domains_numa_levels; i++) {
		for (j = 0; j < nr_node_ids; j++) {
			if (node_distance(j, node) <= sched_domains_numa_distance[i])
				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
		}
	}
}

static void sched_domains_numa_masks_clear(int cpu)
{
	int i, j;
	for (i = 0; i < sched_domains_numa_levels; i++) {
		for (j = 0; j < nr_node_ids; j++)
			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
	}
}

/*
 * Update sched_domains_numa_masks[level][node] array when new cpus
 * are onlined.
 */
static int sched_domains_numa_masks_update(struct notifier_block *nfb,
					   unsigned long action,
					   void *hcpu)
{
	int cpu = (long)hcpu;

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
		sched_domains_numa_masks_set(cpu);
		break;

	case CPU_DEAD:
		sched_domains_numa_masks_clear(cpu);
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
6283 6284 6285 6286 6287
}
#else
static inline void sched_init_numa(void)
{
}
6288 6289 6290 6291 6292 6293 6294

static int sched_domains_numa_masks_update(struct notifier_block *nfb,
					   unsigned long action,
					   void *hcpu)
{
	return 0;
}
6295 6296
#endif /* CONFIG_NUMA */

6297 6298 6299 6300 6301
static int __sdt_alloc(const struct cpumask *cpu_map)
{
	struct sched_domain_topology_level *tl;
	int j;

6302
	for_each_sd_topology(tl) {
6303 6304 6305 6306 6307 6308 6309 6310 6311 6312
		struct sd_data *sdd = &tl->data;

		sdd->sd = alloc_percpu(struct sched_domain *);
		if (!sdd->sd)
			return -ENOMEM;

		sdd->sg = alloc_percpu(struct sched_group *);
		if (!sdd->sg)
			return -ENOMEM;

6313 6314 6315 6316
		sdd->sgp = alloc_percpu(struct sched_group_power *);
		if (!sdd->sgp)
			return -ENOMEM;

6317 6318 6319
		for_each_cpu(j, cpu_map) {
			struct sched_domain *sd;
			struct sched_group *sg;
6320
			struct sched_group_power *sgp;
6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333

		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
					GFP_KERNEL, cpu_to_node(j));
			if (!sd)
				return -ENOMEM;

			*per_cpu_ptr(sdd->sd, j) = sd;

			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
					GFP_KERNEL, cpu_to_node(j));
			if (!sg)
				return -ENOMEM;

6334 6335
			sg->next = sg;

6336
			*per_cpu_ptr(sdd->sg, j) = sg;
6337

P
Peter Zijlstra 已提交
6338
			sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
6339 6340 6341 6342 6343
					GFP_KERNEL, cpu_to_node(j));
			if (!sgp)
				return -ENOMEM;

			*per_cpu_ptr(sdd->sgp, j) = sgp;
6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354
		}
	}

	return 0;
}

static void __sdt_free(const struct cpumask *cpu_map)
{
	struct sched_domain_topology_level *tl;
	int j;

6355
	for_each_sd_topology(tl) {
6356 6357 6358
		struct sd_data *sdd = &tl->data;

		for_each_cpu(j, cpu_map) {
6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371
			struct sched_domain *sd;

			if (sdd->sd) {
				sd = *per_cpu_ptr(sdd->sd, j);
				if (sd && (sd->flags & SD_OVERLAP))
					free_sched_groups(sd->groups, 0);
				kfree(*per_cpu_ptr(sdd->sd, j));
			}

			if (sdd->sg)
				kfree(*per_cpu_ptr(sdd->sg, j));
			if (sdd->sgp)
				kfree(*per_cpu_ptr(sdd->sgp, j));
6372 6373
		}
		free_percpu(sdd->sd);
6374
		sdd->sd = NULL;
6375
		free_percpu(sdd->sg);
6376
		sdd->sg = NULL;
6377
		free_percpu(sdd->sgp);
6378
		sdd->sgp = NULL;
6379 6380 6381
	}
}

6382
struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6383 6384
		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
		struct sched_domain *child, int cpu)
6385
{
6386
	struct sched_domain *sd = tl->init(tl, cpu);
6387
	if (!sd)
6388
		return child;
6389 6390

	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6391 6392 6393
	if (child) {
		sd->level = child->level + 1;
		sched_domain_level_max = max(sched_domain_level_max, sd->level);
6394
		child->parent = sd;
6395
		sd->child = child;
6396
	}
6397
	set_domain_attribute(sd, attr);
6398 6399 6400 6401

	return sd;
}

6402 6403 6404 6405
/*
 * Build sched domains for a given set of cpus and attach the sched domains
 * to the individual cpus
 */
6406 6407
static int build_sched_domains(const struct cpumask *cpu_map,
			       struct sched_domain_attr *attr)
6408
{
6409
	enum s_alloc alloc_state;
6410
	struct sched_domain *sd;
6411
	struct s_data d;
6412
	int i, ret = -ENOMEM;
6413

6414 6415 6416
	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
	if (alloc_state != sa_rootdomain)
		goto error;
6417

6418
	/* Set up domains for cpus specified by the cpu_map. */
6419
	for_each_cpu(i, cpu_map) {
6420 6421
		struct sched_domain_topology_level *tl;

6422
		sd = NULL;
6423
		for_each_sd_topology(tl) {
6424
			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
6425 6426
			if (tl == sched_domain_topology)
				*per_cpu_ptr(d.sd, i) = sd;
6427 6428
			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
				sd->flags |= SD_OVERLAP;
6429 6430
			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
				break;
6431
		}
6432 6433 6434 6435 6436 6437
	}

	/* Build the groups for the domains */
	for_each_cpu(i, cpu_map) {
		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
			sd->span_weight = cpumask_weight(sched_domain_span(sd));
6438 6439 6440 6441 6442 6443 6444
			if (sd->flags & SD_OVERLAP) {
				if (build_overlap_sched_groups(sd, i))
					goto error;
			} else {
				if (build_sched_groups(sd, i))
					goto error;
			}
6445
		}
6446
	}
6447

L
Linus Torvalds 已提交
6448
	/* Calculate CPU power for physical packages and nodes */
6449 6450 6451
	for (i = nr_cpumask_bits-1; i >= 0; i--) {
		if (!cpumask_test_cpu(i, cpu_map))
			continue;
6452

6453 6454
		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
			claim_allocations(i, sd);
6455
			init_sched_groups_power(i, sd);
6456
		}
6457
	}
6458

L
Linus Torvalds 已提交
6459
	/* Attach the domains */
6460
	rcu_read_lock();
6461
	for_each_cpu(i, cpu_map) {
6462
		sd = *per_cpu_ptr(d.sd, i);
6463
		cpu_attach_domain(sd, d.rd, i);
L
Linus Torvalds 已提交
6464
	}
6465
	rcu_read_unlock();
6466

6467
	ret = 0;
6468
error:
6469
	__free_domain_allocs(&d, alloc_state, cpu_map);
6470
	return ret;
L
Linus Torvalds 已提交
6471
}
P
Paul Jackson 已提交
6472

6473
static cpumask_var_t *doms_cur;	/* current sched domains */
P
Paul Jackson 已提交
6474
static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
I
Ingo Molnar 已提交
6475 6476
static struct sched_domain_attr *dattr_cur;
				/* attribues of custom domains in 'doms_cur' */
P
Paul Jackson 已提交
6477 6478 6479

/*
 * Special case: If a kmalloc of a doms_cur partition (array of
6480 6481
 * cpumask) fails, then fallback to a single sched domain,
 * as determined by the single cpumask fallback_doms.
P
Paul Jackson 已提交
6482
 */
6483
static cpumask_var_t fallback_doms;
P
Paul Jackson 已提交
6484

6485 6486 6487 6488 6489
/*
 * arch_update_cpu_topology lets virtualized architectures update the
 * cpu core maps. It is supposed to return 1 if the topology changed
 * or 0 if it stayed the same.
 */
6490
int __weak arch_update_cpu_topology(void)
6491
{
6492
	return 0;
6493 6494
}

6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519
cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
{
	int i;
	cpumask_var_t *doms;

	doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
	if (!doms)
		return NULL;
	for (i = 0; i < ndoms; i++) {
		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
			free_sched_domains(doms, i);
			return NULL;
		}
	}
	return doms;
}

void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
{
	unsigned int i;
	for (i = 0; i < ndoms; i++)
		free_cpumask_var(doms[i]);
	kfree(doms);
}

6520
/*
I
Ingo Molnar 已提交
6521
 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
P
Paul Jackson 已提交
6522 6523
 * For now this just excludes isolated cpus, but could be used to
 * exclude other special cases in the future.
6524
 */
6525
static int init_sched_domains(const struct cpumask *cpu_map)
6526
{
6527 6528
	int err;

6529
	arch_update_cpu_topology();
P
Paul Jackson 已提交
6530
	ndoms_cur = 1;
6531
	doms_cur = alloc_sched_domains(ndoms_cur);
P
Paul Jackson 已提交
6532
	if (!doms_cur)
6533 6534
		doms_cur = &fallback_doms;
	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6535
	err = build_sched_domains(doms_cur[0], NULL);
6536
	register_sched_domain_sysctl();
6537 6538

	return err;
6539 6540 6541 6542 6543 6544
}

/*
 * Detach sched domains from a group of cpus specified in cpu_map
 * These cpus will now be attached to the NULL domain
 */
6545
static void detach_destroy_domains(const struct cpumask *cpu_map)
6546 6547 6548
{
	int i;

6549
	rcu_read_lock();
6550
	for_each_cpu(i, cpu_map)
G
Gregory Haskins 已提交
6551
		cpu_attach_domain(NULL, &def_root_domain, i);
6552
	rcu_read_unlock();
6553 6554
}

6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570
/* handle null as "default" */
static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
			struct sched_domain_attr *new, int idx_new)
{
	struct sched_domain_attr tmp;

	/* fast path */
	if (!new && !cur)
		return 1;

	tmp = SD_ATTR_INIT;
	return !memcmp(cur ? (cur + idx_cur) : &tmp,
			new ? (new + idx_new) : &tmp,
			sizeof(struct sched_domain_attr));
}

P
Paul Jackson 已提交
6571 6572
/*
 * Partition sched domains as specified by the 'ndoms_new'
I
Ingo Molnar 已提交
6573
 * cpumasks in the array doms_new[] of cpumasks. This compares
P
Paul Jackson 已提交
6574 6575 6576
 * doms_new[] to the current sched domain partitioning, doms_cur[].
 * It destroys each deleted domain and builds each new domain.
 *
6577
 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
I
Ingo Molnar 已提交
6578 6579 6580
 * The masks don't intersect (don't overlap.) We should setup one
 * sched domain for each mask. CPUs not in any of the cpumasks will
 * not be load balanced. If the same cpumask appears both in the
P
Paul Jackson 已提交
6581 6582 6583
 * current 'doms_cur' domains and in the new 'doms_new', we can leave
 * it as it is.
 *
6584 6585 6586 6587 6588 6589
 * The passed in 'doms_new' should be allocated using
 * alloc_sched_domains.  This routine takes ownership of it and will
 * free_sched_domains it when done with it. If the caller failed the
 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
 * and partition_sched_domains() will fallback to the single partition
 * 'fallback_doms', it also forces the domains to be rebuilt.
P
Paul Jackson 已提交
6590
 *
6591
 * If doms_new == NULL it will be replaced with cpu_online_mask.
6592 6593
 * ndoms_new == 0 is a special case for destroying existing domains,
 * and it will not create the default domain.
6594
 *
P
Paul Jackson 已提交
6595 6596
 * Call with hotplug lock held
 */
6597
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
6598
			     struct sched_domain_attr *dattr_new)
P
Paul Jackson 已提交
6599
{
6600
	int i, j, n;
6601
	int new_topology;
P
Paul Jackson 已提交
6602

6603
	mutex_lock(&sched_domains_mutex);
6604

6605 6606 6607
	/* always unregister in case we don't destroy any domains */
	unregister_sched_domain_sysctl();

6608 6609 6610
	/* Let architecture update cpu core mappings. */
	new_topology = arch_update_cpu_topology();

6611
	n = doms_new ? ndoms_new : 0;
P
Paul Jackson 已提交
6612 6613 6614

	/* Destroy deleted domains */
	for (i = 0; i < ndoms_cur; i++) {
6615
		for (j = 0; j < n && !new_topology; j++) {
6616
			if (cpumask_equal(doms_cur[i], doms_new[j])
6617
			    && dattrs_equal(dattr_cur, i, dattr_new, j))
P
Paul Jackson 已提交
6618 6619 6620
				goto match1;
		}
		/* no match - a current sched domain not in new doms_new[] */
6621
		detach_destroy_domains(doms_cur[i]);
P
Paul Jackson 已提交
6622 6623 6624 6625
match1:
		;
	}

6626
	n = ndoms_cur;
6627
	if (doms_new == NULL) {
6628
		n = 0;
6629
		doms_new = &fallback_doms;
6630
		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
6631
		WARN_ON_ONCE(dattr_new);
6632 6633
	}

P
Paul Jackson 已提交
6634 6635
	/* Build new domains */
	for (i = 0; i < ndoms_new; i++) {
6636
		for (j = 0; j < n && !new_topology; j++) {
6637
			if (cpumask_equal(doms_new[i], doms_cur[j])
6638
			    && dattrs_equal(dattr_new, i, dattr_cur, j))
P
Paul Jackson 已提交
6639 6640 6641
				goto match2;
		}
		/* no match - add a new doms_new */
6642
		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
P
Paul Jackson 已提交
6643 6644 6645 6646 6647
match2:
		;
	}

	/* Remember the new sched domains */
6648 6649
	if (doms_cur != &fallback_doms)
		free_sched_domains(doms_cur, ndoms_cur);
6650
	kfree(dattr_cur);	/* kfree(NULL) is safe */
P
Paul Jackson 已提交
6651
	doms_cur = doms_new;
6652
	dattr_cur = dattr_new;
P
Paul Jackson 已提交
6653
	ndoms_cur = ndoms_new;
6654 6655

	register_sched_domain_sysctl();
6656

6657
	mutex_unlock(&sched_domains_mutex);
P
Paul Jackson 已提交
6658 6659
}

6660 6661
static int num_cpus_frozen;	/* used to mark begin/end of suspend/resume */

L
Linus Torvalds 已提交
6662
/*
6663 6664 6665
 * Update cpusets according to cpu_active mask.  If cpusets are
 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
 * around partition_sched_domains().
6666 6667 6668
 *
 * If we come here as part of a suspend/resume, don't touch cpusets because we
 * want to restore it back to its original state upon resume anyway.
L
Linus Torvalds 已提交
6669
 */
6670 6671
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
			     void *hcpu)
6672
{
6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694
	switch (action) {
	case CPU_ONLINE_FROZEN:
	case CPU_DOWN_FAILED_FROZEN:

		/*
		 * num_cpus_frozen tracks how many CPUs are involved in suspend
		 * resume sequence. As long as this is not the last online
		 * operation in the resume sequence, just build a single sched
		 * domain, ignoring cpusets.
		 */
		num_cpus_frozen--;
		if (likely(num_cpus_frozen)) {
			partition_sched_domains(1, NULL, NULL);
			break;
		}

		/*
		 * This is the last CPU online operation. So fall through and
		 * restore the original sched domains by considering the
		 * cpuset configurations.
		 */

6695
	case CPU_ONLINE:
6696
	case CPU_DOWN_FAILED:
6697
		cpuset_update_active_cpus(true);
6698
		break;
6699 6700 6701
	default:
		return NOTIFY_DONE;
	}
6702
	return NOTIFY_OK;
6703
}
6704

6705 6706
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
			       void *hcpu)
6707
{
6708
	switch (action) {
6709
	case CPU_DOWN_PREPARE:
6710
		cpuset_update_active_cpus(false);
6711 6712 6713 6714 6715
		break;
	case CPU_DOWN_PREPARE_FROZEN:
		num_cpus_frozen++;
		partition_sched_domains(1, NULL, NULL);
		break;
6716 6717 6718
	default:
		return NOTIFY_DONE;
	}
6719
	return NOTIFY_OK;
6720 6721
}

L
Linus Torvalds 已提交
6722 6723
void __init sched_init_smp(void)
{
6724 6725 6726
	cpumask_var_t non_isolated_cpus;

	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
6727
	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
6728

6729 6730
	sched_init_numa();

6731 6732 6733 6734 6735
	/*
	 * There's no userspace yet to cause hotplug operations; hence all the
	 * cpu masks are stable and all blatant races in the below code cannot
	 * happen.
	 */
6736
	mutex_lock(&sched_domains_mutex);
6737
	init_sched_domains(cpu_active_mask);
6738 6739 6740
	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
	if (cpumask_empty(non_isolated_cpus))
		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
6741
	mutex_unlock(&sched_domains_mutex);
6742

6743
	hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
6744 6745
	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
6746

6747
	init_hrtick();
6748 6749

	/* Move init over to a non-isolated CPU */
6750
	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
6751
		BUG();
I
Ingo Molnar 已提交
6752
	sched_init_granularity();
6753
	free_cpumask_var(non_isolated_cpus);
6754

6755
	init_sched_rt_class();
6756
	init_sched_dl_class();
L
Linus Torvalds 已提交
6757 6758 6759 6760
}
#else
void __init sched_init_smp(void)
{
I
Ingo Molnar 已提交
6761
	sched_init_granularity();
L
Linus Torvalds 已提交
6762 6763 6764
}
#endif /* CONFIG_SMP */

6765 6766
const_debug unsigned int sysctl_timer_migration = 1;

L
Linus Torvalds 已提交
6767 6768 6769 6770 6771 6772 6773
int in_sched_functions(unsigned long addr)
{
	return in_lock_functions(addr) ||
		(addr >= (unsigned long)__sched_text_start
		&& addr < (unsigned long)__sched_text_end);
}

6774
#ifdef CONFIG_CGROUP_SCHED
6775 6776 6777 6778
/*
 * Default task group.
 * Every task in system belongs to this group at bootup.
 */
6779
struct task_group root_task_group;
6780
LIST_HEAD(task_groups);
6781
#endif
P
Peter Zijlstra 已提交
6782

6783
DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
P
Peter Zijlstra 已提交
6784

L
Linus Torvalds 已提交
6785 6786
void __init sched_init(void)
{
I
Ingo Molnar 已提交
6787
	int i, j;
6788 6789 6790 6791 6792 6793 6794
	unsigned long alloc_size = 0, ptr;

#ifdef CONFIG_FAIR_GROUP_SCHED
	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6795
#endif
6796
#ifdef CONFIG_CPUMASK_OFFSTACK
6797
	alloc_size += num_possible_cpus() * cpumask_size();
6798 6799
#endif
	if (alloc_size) {
6800
		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
6801 6802

#ifdef CONFIG_FAIR_GROUP_SCHED
6803
		root_task_group.se = (struct sched_entity **)ptr;
6804 6805
		ptr += nr_cpu_ids * sizeof(void **);

6806
		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
6807
		ptr += nr_cpu_ids * sizeof(void **);
6808

6809
#endif /* CONFIG_FAIR_GROUP_SCHED */
6810
#ifdef CONFIG_RT_GROUP_SCHED
6811
		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
6812 6813
		ptr += nr_cpu_ids * sizeof(void **);

6814
		root_task_group.rt_rq = (struct rt_rq **)ptr;
6815 6816
		ptr += nr_cpu_ids * sizeof(void **);

6817
#endif /* CONFIG_RT_GROUP_SCHED */
6818 6819
#ifdef CONFIG_CPUMASK_OFFSTACK
		for_each_possible_cpu(i) {
6820
			per_cpu(load_balance_mask, i) = (void *)ptr;
6821 6822 6823
			ptr += cpumask_size();
		}
#endif /* CONFIG_CPUMASK_OFFSTACK */
6824
	}
I
Ingo Molnar 已提交
6825

6826 6827 6828
	init_rt_bandwidth(&def_rt_bandwidth,
			global_rt_period(), global_rt_runtime());
	init_dl_bandwidth(&def_dl_bandwidth,
6829
			global_rt_period(), global_rt_runtime());
6830

G
Gregory Haskins 已提交
6831 6832 6833 6834
#ifdef CONFIG_SMP
	init_defrootdomain();
#endif

6835
#ifdef CONFIG_RT_GROUP_SCHED
6836
	init_rt_bandwidth(&root_task_group.rt_bandwidth,
6837
			global_rt_period(), global_rt_runtime());
6838
#endif /* CONFIG_RT_GROUP_SCHED */
6839

D
Dhaval Giani 已提交
6840
#ifdef CONFIG_CGROUP_SCHED
6841 6842
	list_add(&root_task_group.list, &task_groups);
	INIT_LIST_HEAD(&root_task_group.children);
6843
	INIT_LIST_HEAD(&root_task_group.siblings);
6844
	autogroup_init(&init_task);
6845

D
Dhaval Giani 已提交
6846
#endif /* CONFIG_CGROUP_SCHED */
P
Peter Zijlstra 已提交
6847

6848
	for_each_possible_cpu(i) {
6849
		struct rq *rq;
L
Linus Torvalds 已提交
6850 6851

		rq = cpu_rq(i);
6852
		raw_spin_lock_init(&rq->lock);
N
Nick Piggin 已提交
6853
		rq->nr_running = 0;
6854 6855
		rq->calc_load_active = 0;
		rq->calc_load_update = jiffies + LOAD_FREQ;
6856
		init_cfs_rq(&rq->cfs);
P
Peter Zijlstra 已提交
6857
		init_rt_rq(&rq->rt, rq);
6858
		init_dl_rq(&rq->dl, rq);
I
Ingo Molnar 已提交
6859
#ifdef CONFIG_FAIR_GROUP_SCHED
6860
		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
P
Peter Zijlstra 已提交
6861
		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
D
Dhaval Giani 已提交
6862
		/*
6863
		 * How much cpu bandwidth does root_task_group get?
D
Dhaval Giani 已提交
6864 6865 6866 6867
		 *
		 * In case of task-groups formed thr' the cgroup filesystem, it
		 * gets 100% of the cpu resources in the system. This overall
		 * system cpu resource is divided among the tasks of
6868
		 * root_task_group and its child task-groups in a fair manner,
D
Dhaval Giani 已提交
6869 6870 6871
		 * based on each entity's (task or task-group's) weight
		 * (se->load.weight).
		 *
6872
		 * In other words, if root_task_group has 10 tasks of weight
D
Dhaval Giani 已提交
6873 6874 6875
		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
		 * then A0's share of the cpu resource is:
		 *
6876
		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
D
Dhaval Giani 已提交
6877
		 *
6878 6879
		 * We achieve this by letting root_task_group's tasks sit
		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
D
Dhaval Giani 已提交
6880
		 */
6881
		init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
6882
		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
D
Dhaval Giani 已提交
6883 6884 6885
#endif /* CONFIG_FAIR_GROUP_SCHED */

		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
6886
#ifdef CONFIG_RT_GROUP_SCHED
6887
		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
I
Ingo Molnar 已提交
6888
#endif
L
Linus Torvalds 已提交
6889

I
Ingo Molnar 已提交
6890 6891
		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
			rq->cpu_load[j] = 0;
6892 6893 6894

		rq->last_load_update_tick = jiffies;

L
Linus Torvalds 已提交
6895
#ifdef CONFIG_SMP
N
Nick Piggin 已提交
6896
		rq->sd = NULL;
G
Gregory Haskins 已提交
6897
		rq->rd = NULL;
6898
		rq->cpu_power = SCHED_POWER_SCALE;
6899
		rq->post_schedule = 0;
L
Linus Torvalds 已提交
6900
		rq->active_balance = 0;
I
Ingo Molnar 已提交
6901
		rq->next_balance = jiffies;
L
Linus Torvalds 已提交
6902
		rq->push_cpu = 0;
6903
		rq->cpu = i;
6904
		rq->online = 0;
6905 6906
		rq->idle_stamp = 0;
		rq->avg_idle = 2*sysctl_sched_migration_cost;
6907
		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
6908 6909 6910

		INIT_LIST_HEAD(&rq->cfs_tasks);

6911
		rq_attach_root(rq, &def_root_domain);
6912
#ifdef CONFIG_NO_HZ_COMMON
6913
		rq->nohz_flags = 0;
6914
#endif
6915 6916 6917
#ifdef CONFIG_NO_HZ_FULL
		rq->last_sched_tick = 0;
#endif
L
Linus Torvalds 已提交
6918
#endif
P
Peter Zijlstra 已提交
6919
		init_rq_hrtick(rq);
L
Linus Torvalds 已提交
6920 6921 6922
		atomic_set(&rq->nr_iowait, 0);
	}

6923
	set_load_weight(&init_task);
6924

6925 6926 6927 6928
#ifdef CONFIG_PREEMPT_NOTIFIERS
	INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif

L
Linus Torvalds 已提交
6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941
	/*
	 * The boot idle thread does lazy MMU switching as well:
	 */
	atomic_inc(&init_mm.mm_count);
	enter_lazy_tlb(&init_mm, current);

	/*
	 * Make us the idle thread. Technically, schedule() should not be
	 * called from this thread, however somewhere below it might be,
	 * but because we are the idle thread, we just pick up running again
	 * when this runqueue becomes "idle".
	 */
	init_idle(current, smp_processor_id());
6942 6943 6944

	calc_load_update = jiffies + LOAD_FREQ;

I
Ingo Molnar 已提交
6945 6946 6947 6948
	/*
	 * During early bootup we pretend to be a normal task:
	 */
	current->sched_class = &fair_sched_class;
6949

6950
#ifdef CONFIG_SMP
6951
	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
R
Rusty Russell 已提交
6952 6953 6954
	/* May be allocated at isolcpus cmdline parse time */
	if (cpu_isolated_map == NULL)
		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
6955
	idle_thread_set_boot_cpu();
6956 6957
#endif
	init_sched_fair_class();
6958

6959
	scheduler_running = 1;
L
Linus Torvalds 已提交
6960 6961
}

6962
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
6963 6964
static inline int preempt_count_equals(int preempt_offset)
{
6965
	int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
6966

A
Arnd Bergmann 已提交
6967
	return (nested == preempt_offset);
6968 6969
}

6970
void __might_sleep(const char *file, int line, int preempt_offset)
L
Linus Torvalds 已提交
6971 6972 6973
{
	static unsigned long prev_jiffy;	/* ratelimiting */

6974
	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
6975 6976
	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
	     !is_idle_task(current)) ||
6977
	    system_state != SYSTEM_RUNNING || oops_in_progress)
I
Ingo Molnar 已提交
6978 6979 6980 6981 6982
		return;
	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
		return;
	prev_jiffy = jiffies;

P
Peter Zijlstra 已提交
6983 6984 6985 6986 6987 6988 6989
	printk(KERN_ERR
		"BUG: sleeping function called from invalid context at %s:%d\n",
			file, line);
	printk(KERN_ERR
		"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
			in_atomic(), irqs_disabled(),
			current->pid, current->comm);
I
Ingo Molnar 已提交
6990 6991 6992 6993

	debug_show_held_locks(current);
	if (irqs_disabled())
		print_irqtrace_events(current);
6994 6995 6996 6997 6998 6999 7000
#ifdef CONFIG_DEBUG_PREEMPT
	if (!preempt_count_equals(preempt_offset)) {
		pr_err("Preemption disabled at:");
		print_ip_sym(current->preempt_disable_ip);
		pr_cont("\n");
	}
#endif
I
Ingo Molnar 已提交
7001
	dump_stack();
L
Linus Torvalds 已提交
7002 7003 7004 7005 7006
}
EXPORT_SYMBOL(__might_sleep);
#endif

#ifdef CONFIG_MAGIC_SYSRQ
7007 7008
static void normalize_task(struct rq *rq, struct task_struct *p)
{
P
Peter Zijlstra 已提交
7009
	const struct sched_class *prev_class = p->sched_class;
7010 7011 7012
	struct sched_attr attr = {
		.sched_policy = SCHED_NORMAL,
	};
P
Peter Zijlstra 已提交
7013
	int old_prio = p->prio;
7014
	int on_rq;
7015

P
Peter Zijlstra 已提交
7016
	on_rq = p->on_rq;
7017
	if (on_rq)
7018
		dequeue_task(rq, p, 0);
7019
	__setscheduler(rq, p, &attr);
7020
	if (on_rq) {
7021
		enqueue_task(rq, p, 0);
7022 7023
		resched_task(rq->curr);
	}
P
Peter Zijlstra 已提交
7024 7025

	check_class_changed(rq, p, prev_class, old_prio);
7026 7027
}

L
Linus Torvalds 已提交
7028 7029
void normalize_rt_tasks(void)
{
7030
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
7031
	unsigned long flags;
7032
	struct rq *rq;
L
Linus Torvalds 已提交
7033

7034
	read_lock_irqsave(&tasklist_lock, flags);
7035
	do_each_thread(g, p) {
7036 7037 7038 7039 7040 7041
		/*
		 * Only normalize user tasks:
		 */
		if (!p->mm)
			continue;

I
Ingo Molnar 已提交
7042 7043
		p->se.exec_start		= 0;
#ifdef CONFIG_SCHEDSTATS
7044 7045 7046
		p->se.statistics.wait_start	= 0;
		p->se.statistics.sleep_start	= 0;
		p->se.statistics.block_start	= 0;
I
Ingo Molnar 已提交
7047
#endif
I
Ingo Molnar 已提交
7048

7049
		if (!dl_task(p) && !rt_task(p)) {
I
Ingo Molnar 已提交
7050 7051 7052 7053
			/*
			 * Renice negative nice level userspace
			 * tasks back to 0:
			 */
7054
			if (task_nice(p) < 0 && p->mm)
I
Ingo Molnar 已提交
7055
				set_user_nice(p, 0);
L
Linus Torvalds 已提交
7056
			continue;
I
Ingo Molnar 已提交
7057
		}
L
Linus Torvalds 已提交
7058

7059
		raw_spin_lock(&p->pi_lock);
7060
		rq = __task_rq_lock(p);
L
Linus Torvalds 已提交
7061

7062
		normalize_task(rq, p);
7063

7064
		__task_rq_unlock(rq);
7065
		raw_spin_unlock(&p->pi_lock);
7066 7067
	} while_each_thread(g, p);

7068
	read_unlock_irqrestore(&tasklist_lock, flags);
L
Linus Torvalds 已提交
7069 7070 7071
}

#endif /* CONFIG_MAGIC_SYSRQ */
7072

7073
#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7074
/*
7075
 * These functions are only useful for the IA64 MCA handling, or kdb.
7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088
 *
 * They can only be called when the whole system has been
 * stopped - every CPU needs to be quiescent, and no scheduling
 * activity can take place. Using them for anything else would
 * be a serious bug, and as a result, they aren't even visible
 * under any other configuration.
 */

/**
 * curr_task - return the current task for a given cpu.
 * @cpu: the processor in question.
 *
 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7089 7090
 *
 * Return: The current task for @cpu.
7091
 */
7092
struct task_struct *curr_task(int cpu)
7093 7094 7095 7096
{
	return cpu_curr(cpu);
}

7097 7098 7099
#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */

#ifdef CONFIG_IA64
7100 7101 7102 7103 7104 7105
/**
 * set_curr_task - set the current task for a given cpu.
 * @cpu: the processor in question.
 * @p: the task pointer to set.
 *
 * Description: This function must only be used when non-maskable interrupts
I
Ingo Molnar 已提交
7106 7107
 * are serviced on a separate stack. It allows the architecture to switch the
 * notion of the current task on a cpu in a non-blocking manner. This function
7108 7109 7110 7111 7112 7113 7114
 * must be called with all CPU's synchronized, and interrupts disabled, the
 * and caller must save the original value of the current task (see
 * curr_task() above) and restore that value before reenabling interrupts and
 * re-starting the system.
 *
 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
 */
7115
void set_curr_task(int cpu, struct task_struct *p)
7116 7117 7118 7119 7120
{
	cpu_curr(cpu) = p;
}

#endif
S
Srivatsa Vaddagiri 已提交
7121

D
Dhaval Giani 已提交
7122
#ifdef CONFIG_CGROUP_SCHED
7123 7124 7125
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);

7126 7127 7128 7129
static void free_sched_group(struct task_group *tg)
{
	free_fair_sched_group(tg);
	free_rt_sched_group(tg);
7130
	autogroup_free(tg);
7131 7132 7133 7134
	kfree(tg);
}

/* allocate runqueue etc for a new task group */
7135
struct task_group *sched_create_group(struct task_group *parent)
7136 7137 7138 7139 7140 7141 7142
{
	struct task_group *tg;

	tg = kzalloc(sizeof(*tg), GFP_KERNEL);
	if (!tg)
		return ERR_PTR(-ENOMEM);

7143
	if (!alloc_fair_sched_group(tg, parent))
7144 7145
		goto err;

7146
	if (!alloc_rt_sched_group(tg, parent))
7147 7148
		goto err;

7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159
	return tg;

err:
	free_sched_group(tg);
	return ERR_PTR(-ENOMEM);
}

void sched_online_group(struct task_group *tg, struct task_group *parent)
{
	unsigned long flags;

7160
	spin_lock_irqsave(&task_group_lock, flags);
P
Peter Zijlstra 已提交
7161
	list_add_rcu(&tg->list, &task_groups);
P
Peter Zijlstra 已提交
7162 7163 7164 7165 7166

	WARN_ON(!parent); /* root should already exist */

	tg->parent = parent;
	INIT_LIST_HEAD(&tg->children);
7167
	list_add_rcu(&tg->siblings, &parent->children);
7168
	spin_unlock_irqrestore(&task_group_lock, flags);
S
Srivatsa Vaddagiri 已提交
7169 7170
}

7171
/* rcu callback to free various structures associated with a task group */
P
Peter Zijlstra 已提交
7172
static void free_sched_group_rcu(struct rcu_head *rhp)
S
Srivatsa Vaddagiri 已提交
7173 7174
{
	/* now it should be safe to free those cfs_rqs */
P
Peter Zijlstra 已提交
7175
	free_sched_group(container_of(rhp, struct task_group, rcu));
S
Srivatsa Vaddagiri 已提交
7176 7177
}

7178
/* Destroy runqueue etc associated with a task group */
7179
void sched_destroy_group(struct task_group *tg)
7180 7181 7182 7183 7184 7185
{
	/* wait for possible concurrent references to cfs_rqs complete */
	call_rcu(&tg->rcu, free_sched_group_rcu);
}

void sched_offline_group(struct task_group *tg)
S
Srivatsa Vaddagiri 已提交
7186
{
7187
	unsigned long flags;
7188
	int i;
S
Srivatsa Vaddagiri 已提交
7189

7190 7191
	/* end participation in shares distribution */
	for_each_possible_cpu(i)
7192
		unregister_fair_sched_group(tg, i);
7193 7194

	spin_lock_irqsave(&task_group_lock, flags);
P
Peter Zijlstra 已提交
7195
	list_del_rcu(&tg->list);
P
Peter Zijlstra 已提交
7196
	list_del_rcu(&tg->siblings);
7197
	spin_unlock_irqrestore(&task_group_lock, flags);
S
Srivatsa Vaddagiri 已提交
7198 7199
}

7200
/* change task's runqueue when it moves between groups.
I
Ingo Molnar 已提交
7201 7202 7203
 *	The caller of this function should have put the task in its new group
 *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
 *	reflect its new group.
7204 7205
 */
void sched_move_task(struct task_struct *tsk)
S
Srivatsa Vaddagiri 已提交
7206
{
P
Peter Zijlstra 已提交
7207
	struct task_group *tg;
S
Srivatsa Vaddagiri 已提交
7208 7209 7210 7211 7212 7213
	int on_rq, running;
	unsigned long flags;
	struct rq *rq;

	rq = task_rq_lock(tsk, &flags);

7214
	running = task_current(rq, tsk);
P
Peter Zijlstra 已提交
7215
	on_rq = tsk->on_rq;
S
Srivatsa Vaddagiri 已提交
7216

7217
	if (on_rq)
S
Srivatsa Vaddagiri 已提交
7218
		dequeue_task(rq, tsk, 0);
7219 7220
	if (unlikely(running))
		tsk->sched_class->put_prev_task(rq, tsk);
S
Srivatsa Vaddagiri 已提交
7221

7222
	tg = container_of(task_css_check(tsk, cpu_cgrp_id,
P
Peter Zijlstra 已提交
7223 7224 7225 7226 7227
				lockdep_is_held(&tsk->sighand->siglock)),
			  struct task_group, css);
	tg = autogroup_task_group(tsk, tg);
	tsk->sched_task_group = tg;

P
Peter Zijlstra 已提交
7228
#ifdef CONFIG_FAIR_GROUP_SCHED
7229 7230 7231
	if (tsk->sched_class->task_move_group)
		tsk->sched_class->task_move_group(tsk, on_rq);
	else
P
Peter Zijlstra 已提交
7232
#endif
7233
		set_task_rq(tsk, task_cpu(tsk));
P
Peter Zijlstra 已提交
7234

7235 7236 7237
	if (unlikely(running))
		tsk->sched_class->set_curr_task(rq);
	if (on_rq)
7238
		enqueue_task(rq, tsk, 0);
S
Srivatsa Vaddagiri 已提交
7239

7240
	task_rq_unlock(rq, tsk, &flags);
S
Srivatsa Vaddagiri 已提交
7241
}
D
Dhaval Giani 已提交
7242
#endif /* CONFIG_CGROUP_SCHED */
S
Srivatsa Vaddagiri 已提交
7243

7244 7245 7246 7247 7248
#ifdef CONFIG_RT_GROUP_SCHED
/*
 * Ensure that the real time constraints are schedulable.
 */
static DEFINE_MUTEX(rt_constraints_mutex);
P
Peter Zijlstra 已提交
7249

P
Peter Zijlstra 已提交
7250 7251
/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
7252
{
P
Peter Zijlstra 已提交
7253
	struct task_struct *g, *p;
7254

P
Peter Zijlstra 已提交
7255
	do_each_thread(g, p) {
7256
		if (rt_task(p) && task_rq(p)->rt.tg == tg)
P
Peter Zijlstra 已提交
7257 7258
			return 1;
	} while_each_thread(g, p);
7259

P
Peter Zijlstra 已提交
7260 7261
	return 0;
}
7262

P
Peter Zijlstra 已提交
7263 7264 7265 7266 7267
struct rt_schedulable_data {
	struct task_group *tg;
	u64 rt_period;
	u64 rt_runtime;
};
7268

7269
static int tg_rt_schedulable(struct task_group *tg, void *data)
P
Peter Zijlstra 已提交
7270 7271 7272 7273 7274
{
	struct rt_schedulable_data *d = data;
	struct task_group *child;
	unsigned long total, sum = 0;
	u64 period, runtime;
7275

P
Peter Zijlstra 已提交
7276 7277
	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
	runtime = tg->rt_bandwidth.rt_runtime;
7278

P
Peter Zijlstra 已提交
7279 7280 7281
	if (tg == d->tg) {
		period = d->rt_period;
		runtime = d->rt_runtime;
7282 7283
	}

7284 7285 7286 7287 7288
	/*
	 * Cannot have more runtime than the period.
	 */
	if (runtime > period && runtime != RUNTIME_INF)
		return -EINVAL;
P
Peter Zijlstra 已提交
7289

7290 7291 7292
	/*
	 * Ensure we don't starve existing RT tasks.
	 */
P
Peter Zijlstra 已提交
7293 7294
	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
		return -EBUSY;
P
Peter Zijlstra 已提交
7295

P
Peter Zijlstra 已提交
7296
	total = to_ratio(period, runtime);
P
Peter Zijlstra 已提交
7297

7298 7299 7300 7301 7302
	/*
	 * Nobody can have more than the global setting allows.
	 */
	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
		return -EINVAL;
P
Peter Zijlstra 已提交
7303

7304 7305 7306
	/*
	 * The sum of our children's runtime should not exceed our own.
	 */
P
Peter Zijlstra 已提交
7307 7308 7309
	list_for_each_entry_rcu(child, &tg->children, siblings) {
		period = ktime_to_ns(child->rt_bandwidth.rt_period);
		runtime = child->rt_bandwidth.rt_runtime;
P
Peter Zijlstra 已提交
7310

P
Peter Zijlstra 已提交
7311 7312 7313 7314
		if (child == d->tg) {
			period = d->rt_period;
			runtime = d->rt_runtime;
		}
P
Peter Zijlstra 已提交
7315

P
Peter Zijlstra 已提交
7316
		sum += to_ratio(period, runtime);
P
Peter Zijlstra 已提交
7317
	}
P
Peter Zijlstra 已提交
7318

P
Peter Zijlstra 已提交
7319 7320 7321 7322
	if (sum > total)
		return -EINVAL;

	return 0;
P
Peter Zijlstra 已提交
7323 7324
}

P
Peter Zijlstra 已提交
7325
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7326
{
7327 7328
	int ret;

P
Peter Zijlstra 已提交
7329 7330 7331 7332 7333 7334
	struct rt_schedulable_data data = {
		.tg = tg,
		.rt_period = period,
		.rt_runtime = runtime,
	};

7335 7336 7337 7338 7339
	rcu_read_lock();
	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
	rcu_read_unlock();

	return ret;
7340 7341
}

7342
static int tg_set_rt_bandwidth(struct task_group *tg,
7343
		u64 rt_period, u64 rt_runtime)
P
Peter Zijlstra 已提交
7344
{
P
Peter Zijlstra 已提交
7345
	int i, err = 0;
P
Peter Zijlstra 已提交
7346 7347

	mutex_lock(&rt_constraints_mutex);
7348
	read_lock(&tasklist_lock);
P
Peter Zijlstra 已提交
7349 7350
	err = __rt_schedulable(tg, rt_period, rt_runtime);
	if (err)
P
Peter Zijlstra 已提交
7351
		goto unlock;
P
Peter Zijlstra 已提交
7352

7353
	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7354 7355
	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
	tg->rt_bandwidth.rt_runtime = rt_runtime;
P
Peter Zijlstra 已提交
7356 7357 7358 7359

	for_each_possible_cpu(i) {
		struct rt_rq *rt_rq = tg->rt_rq[i];

7360
		raw_spin_lock(&rt_rq->rt_runtime_lock);
P
Peter Zijlstra 已提交
7361
		rt_rq->rt_runtime = rt_runtime;
7362
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
P
Peter Zijlstra 已提交
7363
	}
7364
	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
P
Peter Zijlstra 已提交
7365
unlock:
7366
	read_unlock(&tasklist_lock);
P
Peter Zijlstra 已提交
7367 7368 7369
	mutex_unlock(&rt_constraints_mutex);

	return err;
P
Peter Zijlstra 已提交
7370 7371
}

7372
static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7373 7374 7375 7376 7377 7378 7379 7380
{
	u64 rt_runtime, rt_period;

	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
	if (rt_runtime_us < 0)
		rt_runtime = RUNTIME_INF;

7381
	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7382 7383
}

7384
static long sched_group_rt_runtime(struct task_group *tg)
P
Peter Zijlstra 已提交
7385 7386 7387
{
	u64 rt_runtime_us;

7388
	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
P
Peter Zijlstra 已提交
7389 7390
		return -1;

7391
	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
P
Peter Zijlstra 已提交
7392 7393 7394
	do_div(rt_runtime_us, NSEC_PER_USEC);
	return rt_runtime_us;
}
7395

7396
static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7397 7398 7399 7400 7401 7402
{
	u64 rt_runtime, rt_period;

	rt_period = (u64)rt_period_us * NSEC_PER_USEC;
	rt_runtime = tg->rt_bandwidth.rt_runtime;

7403 7404 7405
	if (rt_period == 0)
		return -EINVAL;

7406
	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7407 7408
}

7409
static long sched_group_rt_period(struct task_group *tg)
7410 7411 7412 7413 7414 7415 7416
{
	u64 rt_period_us;

	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
	do_div(rt_period_us, NSEC_PER_USEC);
	return rt_period_us;
}
7417
#endif /* CONFIG_RT_GROUP_SCHED */
7418

7419
#ifdef CONFIG_RT_GROUP_SCHED
7420 7421 7422 7423 7424
static int sched_rt_global_constraints(void)
{
	int ret = 0;

	mutex_lock(&rt_constraints_mutex);
P
Peter Zijlstra 已提交
7425
	read_lock(&tasklist_lock);
7426
	ret = __rt_schedulable(NULL, 0, 0);
P
Peter Zijlstra 已提交
7427
	read_unlock(&tasklist_lock);
7428 7429 7430 7431
	mutex_unlock(&rt_constraints_mutex);

	return ret;
}
7432

7433
static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7434 7435 7436 7437 7438 7439 7440 7441
{
	/* Don't accept realtime tasks when there is no way for them to run */
	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
		return 0;

	return 1;
}

7442
#else /* !CONFIG_RT_GROUP_SCHED */
7443 7444
static int sched_rt_global_constraints(void)
{
P
Peter Zijlstra 已提交
7445
	unsigned long flags;
7446
	int i, ret = 0;
7447

7448
	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
P
Peter Zijlstra 已提交
7449 7450 7451
	for_each_possible_cpu(i) {
		struct rt_rq *rt_rq = &cpu_rq(i)->rt;

7452
		raw_spin_lock(&rt_rq->rt_runtime_lock);
P
Peter Zijlstra 已提交
7453
		rt_rq->rt_runtime = global_rt_runtime();
7454
		raw_spin_unlock(&rt_rq->rt_runtime_lock);
P
Peter Zijlstra 已提交
7455
	}
7456
	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
P
Peter Zijlstra 已提交
7457

7458
	return ret;
7459
}
7460
#endif /* CONFIG_RT_GROUP_SCHED */
7461

7462 7463
static int sched_dl_global_constraints(void)
{
7464 7465
	u64 runtime = global_rt_runtime();
	u64 period = global_rt_period();
7466
	u64 new_bw = to_ratio(period, runtime);
7467
	int cpu, ret = 0;
7468
	unsigned long flags;
7469 7470 7471 7472 7473 7474 7475 7476 7477 7478

	/*
	 * Here we want to check the bandwidth not being set to some
	 * value smaller than the currently allocated bandwidth in
	 * any of the root_domains.
	 *
	 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
	 * cycling on root_domains... Discussion on different/better
	 * solutions is welcome!
	 */
7479 7480
	for_each_possible_cpu(cpu) {
		struct dl_bw *dl_b = dl_bw_of(cpu);
7481

7482
		raw_spin_lock_irqsave(&dl_b->lock, flags);
7483 7484
		if (new_bw < dl_b->total_bw)
			ret = -EBUSY;
7485
		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7486 7487 7488

		if (ret)
			break;
7489 7490
	}

7491
	return ret;
7492 7493
}

7494
static void sched_dl_do_global(void)
7495
{
7496 7497
	u64 new_bw = -1;
	int cpu;
7498
	unsigned long flags;
7499

7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511
	def_dl_bandwidth.dl_period = global_rt_period();
	def_dl_bandwidth.dl_runtime = global_rt_runtime();

	if (global_rt_runtime() != RUNTIME_INF)
		new_bw = to_ratio(global_rt_period(), global_rt_runtime());

	/*
	 * FIXME: As above...
	 */
	for_each_possible_cpu(cpu) {
		struct dl_bw *dl_b = dl_bw_of(cpu);

7512
		raw_spin_lock_irqsave(&dl_b->lock, flags);
7513
		dl_b->bw = new_bw;
7514
		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7515
	}
7516 7517 7518 7519 7520 7521 7522
}

static int sched_rt_global_validate(void)
{
	if (sysctl_sched_rt_period <= 0)
		return -EINVAL;

7523 7524
	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
		(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
7525 7526 7527 7528 7529 7530 7531 7532 7533
		return -EINVAL;

	return 0;
}

static void sched_rt_do_global(void)
{
	def_rt_bandwidth.rt_runtime = global_rt_runtime();
	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
7534 7535
}

7536
int sched_rt_handler(struct ctl_table *table, int write,
7537
		void __user *buffer, size_t *lenp,
7538 7539 7540 7541
		loff_t *ppos)
{
	int old_period, old_runtime;
	static DEFINE_MUTEX(mutex);
7542
	int ret;
7543 7544 7545 7546 7547

	mutex_lock(&mutex);
	old_period = sysctl_sched_rt_period;
	old_runtime = sysctl_sched_rt_runtime;

7548
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7549 7550

	if (!ret && write) {
7551 7552 7553 7554
		ret = sched_rt_global_validate();
		if (ret)
			goto undo;

7555
		ret = sched_rt_global_constraints();
7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569
		if (ret)
			goto undo;

		ret = sched_dl_global_constraints();
		if (ret)
			goto undo;

		sched_rt_do_global();
		sched_dl_do_global();
	}
	if (0) {
undo:
		sysctl_sched_rt_period = old_period;
		sysctl_sched_rt_runtime = old_runtime;
7570 7571 7572 7573 7574
	}
	mutex_unlock(&mutex);

	return ret;
}
7575

7576
int sched_rr_handler(struct ctl_table *table, int write,
7577 7578 7579 7580 7581 7582 7583 7584
		void __user *buffer, size_t *lenp,
		loff_t *ppos)
{
	int ret;
	static DEFINE_MUTEX(mutex);

	mutex_lock(&mutex);
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7585 7586
	/* make sure that internally we keep jiffies */
	/* also, writing zero resets timeslice to default */
7587
	if (!ret && write) {
7588 7589
		sched_rr_timeslice = sched_rr_timeslice <= 0 ?
			RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
7590 7591 7592 7593 7594
	}
	mutex_unlock(&mutex);
	return ret;
}

7595
#ifdef CONFIG_CGROUP_SCHED
7596

7597
static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
7598
{
7599
	return css ? container_of(css, struct task_group, css) : NULL;
7600 7601
}

7602 7603
static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
7604
{
7605 7606
	struct task_group *parent = css_tg(parent_css);
	struct task_group *tg;
7607

7608
	if (!parent) {
7609
		/* This is early initialization for the top cgroup */
7610
		return &root_task_group.css;
7611 7612
	}

7613
	tg = sched_create_group(parent);
7614 7615 7616 7617 7618 7619
	if (IS_ERR(tg))
		return ERR_PTR(-ENOMEM);

	return &tg->css;
}

7620
static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
7621
{
7622 7623
	struct task_group *tg = css_tg(css);
	struct task_group *parent = css_tg(css_parent(css));
7624

T
Tejun Heo 已提交
7625 7626
	if (parent)
		sched_online_group(tg, parent);
7627 7628 7629
	return 0;
}

7630
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
7631
{
7632
	struct task_group *tg = css_tg(css);
7633 7634 7635 7636

	sched_destroy_group(tg);
}

7637
static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
7638
{
7639
	struct task_group *tg = css_tg(css);
7640 7641 7642 7643

	sched_offline_group(tg);
}

7644
static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7645
				 struct cgroup_taskset *tset)
7646
{
7647 7648
	struct task_struct *task;

7649
	cgroup_taskset_for_each(task, tset) {
7650
#ifdef CONFIG_RT_GROUP_SCHED
7651
		if (!sched_rt_can_attach(css_tg(css), task))
7652
			return -EINVAL;
7653
#else
7654 7655 7656
		/* We don't support RT-tasks being in separate groups */
		if (task->sched_class != &fair_sched_class)
			return -EINVAL;
7657
#endif
7658
	}
7659 7660
	return 0;
}
7661

7662
static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
7663
			      struct cgroup_taskset *tset)
7664
{
7665 7666
	struct task_struct *task;

7667
	cgroup_taskset_for_each(task, tset)
7668
		sched_move_task(task);
7669 7670
}

7671 7672 7673
static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
			    struct cgroup_subsys_state *old_css,
			    struct task_struct *task)
7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685
{
	/*
	 * cgroup_exit() is called in the copy_process() failure path.
	 * Ignore this case since the task hasn't ran yet, this avoids
	 * trying to poke a half freed task state from generic code.
	 */
	if (!(task->flags & PF_EXITING))
		return;

	sched_move_task(task);
}

7686
#ifdef CONFIG_FAIR_GROUP_SCHED
7687 7688
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
				struct cftype *cftype, u64 shareval)
7689
{
7690
	return sched_group_set_shares(css_tg(css), scale_load(shareval));
7691 7692
}

7693 7694
static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
			       struct cftype *cft)
7695
{
7696
	struct task_group *tg = css_tg(css);
7697

7698
	return (u64) scale_load_down(tg->shares);
7699
}
7700 7701

#ifdef CONFIG_CFS_BANDWIDTH
7702 7703
static DEFINE_MUTEX(cfs_constraints_mutex);

7704 7705 7706
const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */

7707 7708
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);

7709 7710
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
{
7711
	int i, ret = 0, runtime_enabled, runtime_was_enabled;
7712
	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732

	if (tg == &root_task_group)
		return -EINVAL;

	/*
	 * Ensure we have at some amount of bandwidth every period.  This is
	 * to prevent reaching a state of large arrears when throttled via
	 * entity_tick() resulting in prolonged exit starvation.
	 */
	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
		return -EINVAL;

	/*
	 * Likewise, bound things on the otherside by preventing insane quota
	 * periods.  This also allows us to normalize in computing quota
	 * feasibility.
	 */
	if (period > max_cfs_quota_period)
		return -EINVAL;

7733 7734 7735 7736 7737
	mutex_lock(&cfs_constraints_mutex);
	ret = __cfs_schedulable(tg, period, quota);
	if (ret)
		goto out_unlock;

7738
	runtime_enabled = quota != RUNTIME_INF;
7739
	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7740 7741 7742 7743 7744 7745
	/*
	 * If we need to toggle cfs_bandwidth_used, off->on must occur
	 * before making related changes, and on->off must occur afterwards
	 */
	if (runtime_enabled && !runtime_was_enabled)
		cfs_bandwidth_usage_inc();
7746 7747 7748
	raw_spin_lock_irq(&cfs_b->lock);
	cfs_b->period = ns_to_ktime(period);
	cfs_b->quota = quota;
7749

P
Paul Turner 已提交
7750
	__refill_cfs_bandwidth_runtime(cfs_b);
7751 7752 7753 7754 7755 7756
	/* restart the period timer (if active) to handle new period expiry */
	if (runtime_enabled && cfs_b->timer_active) {
		/* force a reprogram */
		cfs_b->timer_active = 0;
		__start_cfs_bandwidth(cfs_b);
	}
7757 7758 7759 7760
	raw_spin_unlock_irq(&cfs_b->lock);

	for_each_possible_cpu(i) {
		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
7761
		struct rq *rq = cfs_rq->rq;
7762 7763

		raw_spin_lock_irq(&rq->lock);
7764
		cfs_rq->runtime_enabled = runtime_enabled;
7765
		cfs_rq->runtime_remaining = 0;
7766

7767
		if (cfs_rq->throttled)
7768
			unthrottle_cfs_rq(cfs_rq);
7769 7770
		raw_spin_unlock_irq(&rq->lock);
	}
7771 7772
	if (runtime_was_enabled && !runtime_enabled)
		cfs_bandwidth_usage_dec();
7773 7774
out_unlock:
	mutex_unlock(&cfs_constraints_mutex);
7775

7776
	return ret;
7777 7778 7779 7780 7781 7782
}

int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
{
	u64 quota, period;

7783
	period = ktime_to_ns(tg->cfs_bandwidth.period);
7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795
	if (cfs_quota_us < 0)
		quota = RUNTIME_INF;
	else
		quota = (u64)cfs_quota_us * NSEC_PER_USEC;

	return tg_set_cfs_bandwidth(tg, period, quota);
}

long tg_get_cfs_quota(struct task_group *tg)
{
	u64 quota_us;

7796
	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
7797 7798
		return -1;

7799
	quota_us = tg->cfs_bandwidth.quota;
7800 7801 7802 7803 7804 7805 7806 7807 7808 7809
	do_div(quota_us, NSEC_PER_USEC);

	return quota_us;
}

int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
{
	u64 quota, period;

	period = (u64)cfs_period_us * NSEC_PER_USEC;
7810
	quota = tg->cfs_bandwidth.quota;
7811 7812 7813 7814 7815 7816 7817 7818

	return tg_set_cfs_bandwidth(tg, period, quota);
}

long tg_get_cfs_period(struct task_group *tg)
{
	u64 cfs_period_us;

7819
	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
7820 7821 7822 7823 7824
	do_div(cfs_period_us, NSEC_PER_USEC);

	return cfs_period_us;
}

7825 7826
static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
				  struct cftype *cft)
7827
{
7828
	return tg_get_cfs_quota(css_tg(css));
7829 7830
}

7831 7832
static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
				   struct cftype *cftype, s64 cfs_quota_us)
7833
{
7834
	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
7835 7836
}

7837 7838
static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
				   struct cftype *cft)
7839
{
7840
	return tg_get_cfs_period(css_tg(css));
7841 7842
}

7843 7844
static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
				    struct cftype *cftype, u64 cfs_period_us)
7845
{
7846
	return tg_set_cfs_period(css_tg(css), cfs_period_us);
7847 7848
}

7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880
struct cfs_schedulable_data {
	struct task_group *tg;
	u64 period, quota;
};

/*
 * normalize group quota/period to be quota/max_period
 * note: units are usecs
 */
static u64 normalize_cfs_quota(struct task_group *tg,
			       struct cfs_schedulable_data *d)
{
	u64 quota, period;

	if (tg == d->tg) {
		period = d->period;
		quota = d->quota;
	} else {
		period = tg_get_cfs_period(tg);
		quota = tg_get_cfs_quota(tg);
	}

	/* note: these should typically be equivalent */
	if (quota == RUNTIME_INF || quota == -1)
		return RUNTIME_INF;

	return to_ratio(period, quota);
}

static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
{
	struct cfs_schedulable_data *d = data;
7881
	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7882 7883 7884 7885 7886
	s64 quota = 0, parent_quota = -1;

	if (!tg->parent) {
		quota = RUNTIME_INF;
	} else {
7887
		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907

		quota = normalize_cfs_quota(tg, d);
		parent_quota = parent_b->hierarchal_quota;

		/*
		 * ensure max(child_quota) <= parent_quota, inherit when no
		 * limit is set
		 */
		if (quota == RUNTIME_INF)
			quota = parent_quota;
		else if (parent_quota != RUNTIME_INF && quota > parent_quota)
			return -EINVAL;
	}
	cfs_b->hierarchal_quota = quota;

	return 0;
}

static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{
7908
	int ret;
7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919
	struct cfs_schedulable_data data = {
		.tg = tg,
		.period = period,
		.quota = quota,
	};

	if (quota != RUNTIME_INF) {
		do_div(data.period, NSEC_PER_USEC);
		do_div(data.quota, NSEC_PER_USEC);
	}

7920 7921 7922 7923 7924
	rcu_read_lock();
	ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
	rcu_read_unlock();

	return ret;
7925
}
7926

7927
static int cpu_stats_show(struct seq_file *sf, void *v)
7928
{
7929
	struct task_group *tg = css_tg(seq_css(sf));
7930
	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7931

7932 7933 7934
	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
7935 7936 7937

	return 0;
}
7938
#endif /* CONFIG_CFS_BANDWIDTH */
7939
#endif /* CONFIG_FAIR_GROUP_SCHED */
7940

7941
#ifdef CONFIG_RT_GROUP_SCHED
7942 7943
static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
				struct cftype *cft, s64 val)
P
Peter Zijlstra 已提交
7944
{
7945
	return sched_group_set_rt_runtime(css_tg(css), val);
P
Peter Zijlstra 已提交
7946 7947
}

7948 7949
static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
			       struct cftype *cft)
P
Peter Zijlstra 已提交
7950
{
7951
	return sched_group_rt_runtime(css_tg(css));
P
Peter Zijlstra 已提交
7952
}
7953

7954 7955
static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
				    struct cftype *cftype, u64 rt_period_us)
7956
{
7957
	return sched_group_set_rt_period(css_tg(css), rt_period_us);
7958 7959
}

7960 7961
static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
				   struct cftype *cft)
7962
{
7963
	return sched_group_rt_period(css_tg(css));
7964
}
7965
#endif /* CONFIG_RT_GROUP_SCHED */
P
Peter Zijlstra 已提交
7966

7967
static struct cftype cpu_files[] = {
7968
#ifdef CONFIG_FAIR_GROUP_SCHED
7969 7970
	{
		.name = "shares",
7971 7972
		.read_u64 = cpu_shares_read_u64,
		.write_u64 = cpu_shares_write_u64,
7973
	},
7974
#endif
7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985
#ifdef CONFIG_CFS_BANDWIDTH
	{
		.name = "cfs_quota_us",
		.read_s64 = cpu_cfs_quota_read_s64,
		.write_s64 = cpu_cfs_quota_write_s64,
	},
	{
		.name = "cfs_period_us",
		.read_u64 = cpu_cfs_period_read_u64,
		.write_u64 = cpu_cfs_period_write_u64,
	},
7986 7987
	{
		.name = "stat",
7988
		.seq_show = cpu_stats_show,
7989
	},
7990
#endif
7991
#ifdef CONFIG_RT_GROUP_SCHED
P
Peter Zijlstra 已提交
7992
	{
P
Peter Zijlstra 已提交
7993
		.name = "rt_runtime_us",
7994 7995
		.read_s64 = cpu_rt_runtime_read,
		.write_s64 = cpu_rt_runtime_write,
P
Peter Zijlstra 已提交
7996
	},
7997 7998
	{
		.name = "rt_period_us",
7999 8000
		.read_u64 = cpu_rt_period_read_uint,
		.write_u64 = cpu_rt_period_write_uint,
8001
	},
8002
#endif
8003
	{ }	/* terminate */
8004 8005
};

8006
struct cgroup_subsys cpu_cgrp_subsys = {
8007 8008
	.css_alloc	= cpu_cgroup_css_alloc,
	.css_free	= cpu_cgroup_css_free,
8009 8010
	.css_online	= cpu_cgroup_css_online,
	.css_offline	= cpu_cgroup_css_offline,
8011 8012
	.can_attach	= cpu_cgroup_can_attach,
	.attach		= cpu_cgroup_attach,
8013
	.exit		= cpu_cgroup_exit,
8014
	.base_cftypes	= cpu_files,
8015 8016 8017
	.early_init	= 1,
};

8018
#endif	/* CONFIG_CGROUP_SCHED */
8019

8020 8021 8022 8023 8024
void dump_cpu_task(int cpu)
{
	pr_info("Task dump for CPU %d:\n", cpu);
	sched_show_task(cpu_curr(cpu));
}