debug.c 24.6 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/sched/debug.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12 13
 *
 * Print the CFS rbtree
 *
 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/proc_fs.h>
14
#include <linux/sched/mm.h>
15
#include <linux/sched/task.h>
I
Ingo Molnar 已提交
16 17 18
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/utsname.h>
I
Ingo Molnar 已提交
19
#include <linux/mempolicy.h>
20
#include <linux/debugfs.h>
I
Ingo Molnar 已提交
21

22 23
#include "sched.h"

24 25
static DEFINE_SPINLOCK(sched_debug_lock);

I
Ingo Molnar 已提交
26 27 28 29 30 31 32 33 34 35 36 37
/*
 * This allows printing both to /proc/sched_debug and
 * to the console
 */
#define SEQ_printf(m, x...)			\
 do {						\
	if (m)					\
		seq_printf(m, x);		\
	else					\
		printk(x);			\
 } while (0)

I
Ingo Molnar 已提交
38 39 40
/*
 * Ease the printing of nsec fields:
 */
I
Ingo Molnar 已提交
41
static long long nsec_high(unsigned long long nsec)
I
Ingo Molnar 已提交
42
{
I
Ingo Molnar 已提交
43
	if ((long long)nsec < 0) {
I
Ingo Molnar 已提交
44 45 46 47 48 49 50 51 52
		nsec = -nsec;
		do_div(nsec, 1000000);
		return -nsec;
	}
	do_div(nsec, 1000000);

	return nsec;
}

I
Ingo Molnar 已提交
53
static unsigned long nsec_low(unsigned long long nsec)
I
Ingo Molnar 已提交
54
{
I
Ingo Molnar 已提交
55
	if ((long long)nsec < 0)
I
Ingo Molnar 已提交
56 57 58 59 60 61 62
		nsec = -nsec;

	return do_div(nsec, 1000000);
}

#define SPLIT_NS(x) nsec_high(x), nsec_low(x)

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
#define SCHED_FEAT(name, enabled)	\
	#name ,

static const char * const sched_feat_names[] = {
#include "features.h"
};

#undef SCHED_FEAT

static int sched_feat_show(struct seq_file *m, void *v)
{
	int i;

	for (i = 0; i < __SCHED_FEAT_NR; i++) {
		if (!(sysctl_sched_features & (1UL << i)))
			seq_puts(m, "NO_");
		seq_printf(m, "%s ", sched_feat_names[i]);
	}
	seq_puts(m, "\n");

	return 0;
}

#ifdef HAVE_JUMP_LABEL

#define jump_label_key__true  STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE

#define SCHED_FEAT(name, enabled)	\
	jump_label_key__##enabled ,

struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};

#undef SCHED_FEAT

static void sched_feat_disable(int i)
{
	static_key_disable(&sched_feat_keys[i]);
}

static void sched_feat_enable(int i)
{
	static_key_enable(&sched_feat_keys[i]);
}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */

static int sched_feat_set(char *cmp)
{
	int i;
	int neg = 0;

	if (strncmp(cmp, "NO_", 3) == 0) {
		neg = 1;
		cmp += 3;
	}

	for (i = 0; i < __SCHED_FEAT_NR; i++) {
		if (strcmp(cmp, sched_feat_names[i]) == 0) {
			if (neg) {
				sysctl_sched_features &= ~(1UL << i);
				sched_feat_disable(i);
			} else {
				sysctl_sched_features |= (1UL << i);
				sched_feat_enable(i);
			}
			break;
		}
	}

	return i;
}

static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	char buf[64];
	char *cmp;
	int i;
	struct inode *inode;

	if (cnt > 63)
		cnt = 63;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;
	cmp = strstrip(buf);

	/* Ensure the static_key remains in a consistent state */
	inode = file_inode(filp);
	inode_lock(inode);
	i = sched_feat_set(cmp);
	inode_unlock(inode);
	if (i == __SCHED_FEAT_NR)
		return -EINVAL;

	*ppos += cnt;

	return cnt;
}

static int sched_feat_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, sched_feat_show, NULL);
}

static const struct file_operations sched_feat_fops = {
	.open		= sched_feat_open,
	.write		= sched_feat_write,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

184 185
__read_mostly bool sched_debug_enabled;

186 187 188 189 190
static __init int sched_init_debug(void)
{
	debugfs_create_file("sched_features", 0644, NULL, NULL,
			&sched_feat_fops);

191 192 193
	debugfs_create_bool("sched_debug", 0644, NULL,
			&sched_debug_enabled);

194 195 196 197
	return 0;
}
late_initcall(sched_init_debug);

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
#ifdef CONFIG_SMP

#ifdef CONFIG_SYSCTL

static struct ctl_table sd_ctl_dir[] = {
	{
		.procname	= "sched_domain",
		.mode		= 0555,
	},
	{}
};

static struct ctl_table sd_ctl_root[] = {
	{
		.procname	= "kernel",
		.mode		= 0555,
		.child		= sd_ctl_dir,
	},
	{}
};

static struct ctl_table *sd_alloc_ctl_entry(int n)
{
	struct ctl_table *entry =
		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);

	return entry;
}

static void sd_free_ctl_entry(struct ctl_table **tablep)
{
	struct ctl_table *entry;

	/*
	 * In the intermediate directories, both the child directory and
	 * procname are dynamically allocated and could fail but the mode
	 * will always be set. In the lowest directory the names are
	 * static strings and all have proc handlers.
	 */
	for (entry = *tablep; entry->mode; entry++) {
		if (entry->child)
			sd_free_ctl_entry(&entry->child);
		if (entry->proc_handler == NULL)
			kfree(entry->procname);
	}

	kfree(*tablep);
	*tablep = NULL;
}

static int min_load_idx = 0;
static int max_load_idx = CPU_LOAD_IDX_MAX-1;

static void
set_table_entry(struct ctl_table *entry,
		const char *procname, void *data, int maxlen,
		umode_t mode, proc_handler *proc_handler,
		bool load_idx)
{
	entry->procname = procname;
	entry->data = data;
	entry->maxlen = maxlen;
	entry->mode = mode;
	entry->proc_handler = proc_handler;

	if (load_idx) {
		entry->extra1 = &min_load_idx;
		entry->extra2 = &max_load_idx;
	}
}

static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
	struct ctl_table *table = sd_alloc_ctl_entry(14);

	if (table == NULL)
		return NULL;

	set_table_entry(&table[0], "min_interval", &sd->min_interval,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[1], "max_interval", &sd->max_interval,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[9], "cache_nice_tries",
		&sd->cache_nice_tries,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[10], "flags", &sd->flags,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[11], "max_newidle_lb_cost",
		&sd->max_newidle_lb_cost,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[12], "name", sd->name,
		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
	/* &table[13] is terminator */

	return table;
}

static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
	struct ctl_table *entry, *table;
	struct sched_domain *sd;
	int domain_num = 0, i;
	char buf[32];

	for_each_domain(cpu, sd)
		domain_num++;
	entry = table = sd_alloc_ctl_entry(domain_num + 1);
	if (table == NULL)
		return NULL;

	i = 0;
	for_each_domain(cpu, sd) {
		snprintf(buf, 32, "domain%d", i);
		entry->procname = kstrdup(buf, GFP_KERNEL);
		entry->mode = 0555;
		entry->child = sd_alloc_ctl_domain_table(sd);
		entry++;
		i++;
	}
	return table;
}

335
static cpumask_var_t sd_sysctl_cpus;
336
static struct ctl_table_header *sd_sysctl_header;
337

338 339
void register_sched_domain_sysctl(void)
{
340 341
	static struct ctl_table *cpu_entries;
	static struct ctl_table **cpu_idx;
342
	char buf[32];
343
	int i;
344

345 346 347 348
	if (!cpu_entries) {
		cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
		if (!cpu_entries)
			return;
349

350 351 352
		WARN_ON(sd_ctl_dir[0].child);
		sd_ctl_dir[0].child = cpu_entries;
	}
353

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
	if (!cpu_idx) {
		struct ctl_table *e = cpu_entries;

		cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
		if (!cpu_idx)
			return;

		/* deal with sparse possible map */
		for_each_possible_cpu(i) {
			cpu_idx[i] = e;
			e++;
		}
	}

	if (!cpumask_available(sd_sysctl_cpus)) {
		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
			return;

		/* init to possible to not have holes in @cpu_entries */
		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
	}

	for_each_cpu(i, sd_sysctl_cpus) {
		struct ctl_table *e = cpu_idx[i];

		if (e->child)
			sd_free_ctl_entry(&e->child);

		if (!e->procname) {
			snprintf(buf, 32, "cpu%d", i);
			e->procname = kstrdup(buf, GFP_KERNEL);
		}
		e->mode = 0555;
		e->child = sd_alloc_ctl_cpu_table(i);

		__cpumask_clear_cpu(i, sd_sysctl_cpus);
390 391 392 393 394 395
	}

	WARN_ON(sd_sysctl_header);
	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}

396 397 398 399 400 401
void dirty_sched_domain_sysctl(int cpu)
{
	if (cpumask_available(sd_sysctl_cpus))
		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
}

402 403 404 405 406 407 408 409 410
/* may be called multiple times per register */
void unregister_sched_domain_sysctl(void)
{
	unregister_sysctl_table(sd_sysctl_header);
	sd_sysctl_header = NULL;
}
#endif /* CONFIG_SYSCTL */
#endif /* CONFIG_SMP */

411
#ifdef CONFIG_FAIR_GROUP_SCHED
412
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
413 414 415 416 417
{
	struct sched_entity *se = tg->se[cpu];

#define P(F) \
	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
418 419
#define P_SCHEDSTAT(F) \
	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)schedstat_val(F))
420 421
#define PN(F) \
	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
422 423
#define PN_SCHEDSTAT(F) \
	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
424

Y
Yuyang Du 已提交
425
	if (!se)
426 427
		return;

428 429 430
	PN(se->exec_start);
	PN(se->vruntime);
	PN(se->sum_exec_runtime);
431
	if (schedstat_enabled()) {
432 433 434 435 436 437 438 439 440 441
		PN_SCHEDSTAT(se->statistics.wait_start);
		PN_SCHEDSTAT(se->statistics.sleep_start);
		PN_SCHEDSTAT(se->statistics.block_start);
		PN_SCHEDSTAT(se->statistics.sleep_max);
		PN_SCHEDSTAT(se->statistics.block_max);
		PN_SCHEDSTAT(se->statistics.exec_max);
		PN_SCHEDSTAT(se->statistics.slice_max);
		PN_SCHEDSTAT(se->statistics.wait_max);
		PN_SCHEDSTAT(se->statistics.wait_sum);
		P_SCHEDSTAT(se->statistics.wait_count);
442
	}
443
	P(se->load.weight);
444
	P(se->runnable_weight);
445
#ifdef CONFIG_SMP
446 447
	P(se->avg.load_avg);
	P(se->avg.util_avg);
448
	P(se->avg.runnable_load_avg);
449
#endif
450 451

#undef PN_SCHEDSTAT
452
#undef PN
453
#undef P_SCHEDSTAT
454 455 456 457
#undef P
}
#endif

458 459 460 461 462
#ifdef CONFIG_CGROUP_SCHED
static char group_path[PATH_MAX];

static char *task_group_path(struct task_group *tg)
{
463 464 465
	if (autogroup_path(tg, group_path, PATH_MAX))
		return group_path;

466 467
	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
	return group_path;
468 469 470
}
#endif

I
Ingo Molnar 已提交
471
static void
472
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
I
Ingo Molnar 已提交
473
{
474
	if (rq->curr == p)
475
		SEQ_printf(m, ">R");
476 477
	else
		SEQ_printf(m, " %c", task_state_to_char(p));
I
Ingo Molnar 已提交
478

I
Ingo Molnar 已提交
479
	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
480
		p->comm, task_pid_nr(p),
I
Ingo Molnar 已提交
481
		SPLIT_NS(p->se.vruntime),
I
Ingo Molnar 已提交
482
		(long long)(p->nvcsw + p->nivcsw),
483
		p->prio);
484

485
	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
486
		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
487
		SPLIT_NS(p->se.sum_exec_runtime),
488
		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
489

I
Ingo Molnar 已提交
490
#ifdef CONFIG_NUMA_BALANCING
491
	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
I
Ingo Molnar 已提交
492
#endif
493 494 495
#ifdef CONFIG_CGROUP_SCHED
	SEQ_printf(m, " %s", task_group_path(task_group(p)));
#endif
496 497

	SEQ_printf(m, "\n");
I
Ingo Molnar 已提交
498 499
}

500
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
I
Ingo Molnar 已提交
501 502 503 504 505
{
	struct task_struct *g, *p;

	SEQ_printf(m,
	"\nrunnable tasks:\n"
506
	" S           task   PID         tree-key  switches  prio"
507
	"     wait-time             sum-exec        sum-sleep\n"
508
	"-------------------------------------------------------"
509
	"----------------------------------------------------\n");
I
Ingo Molnar 已提交
510

511
	rcu_read_lock();
512
	for_each_process_thread(g, p) {
I
Ingo Molnar 已提交
513
		if (task_cpu(p) != rq_cpu)
I
Ingo Molnar 已提交
514 515
			continue;

516
		print_task(m, rq, p);
517
	}
518
	rcu_read_unlock();
I
Ingo Molnar 已提交
519 520
}

521
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
I
Ingo Molnar 已提交
522
{
I
Ingo Molnar 已提交
523 524
	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
		spread, rq0_min_vruntime, spread0;
525
	struct rq *rq = cpu_rq(cpu);
I
Ingo Molnar 已提交
526 527 528
	struct sched_entity *last;
	unsigned long flags;

529 530 531
#ifdef CONFIG_FAIR_GROUP_SCHED
	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
#else
532
	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
533
#endif
I
Ingo Molnar 已提交
534 535
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
			SPLIT_NS(cfs_rq->exec_clock));
I
Ingo Molnar 已提交
536

537
	raw_spin_lock_irqsave(&rq->lock, flags);
538
	if (rb_first_cached(&cfs_rq->tasks_timeline))
539
		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
I
Ingo Molnar 已提交
540 541 542
	last = __pick_last_entity(cfs_rq);
	if (last)
		max_vruntime = last->vruntime;
P
Peter Zijlstra 已提交
543
	min_vruntime = cfs_rq->min_vruntime;
544
	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
545
	raw_spin_unlock_irqrestore(&rq->lock, flags);
I
Ingo Molnar 已提交
546 547 548 549 550 551
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
			SPLIT_NS(MIN_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
			SPLIT_NS(min_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
			SPLIT_NS(max_vruntime));
I
Ingo Molnar 已提交
552
	spread = max_vruntime - MIN_vruntime;
I
Ingo Molnar 已提交
553 554
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
			SPLIT_NS(spread));
I
Ingo Molnar 已提交
555
	spread0 = min_vruntime - rq0_min_vruntime;
I
Ingo Molnar 已提交
556 557
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
			SPLIT_NS(spread0));
P
Peter Zijlstra 已提交
558
	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
P
Peter Zijlstra 已提交
559
			cfs_rq->nr_spread_over);
560
	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
P
Peter Zijlstra 已提交
561
	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
562
#ifdef CONFIG_SMP
563
	SEQ_printf(m, "  .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
564 565
	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
			cfs_rq->avg.load_avg);
566
	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
567
			cfs_rq->avg.runnable_load_avg);
568 569
	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
			cfs_rq->avg.util_avg);
570 571 572 573
	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
			cfs_rq->removed.load_avg);
	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
			cfs_rq->removed.util_avg);
574 575
	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_sum",
			cfs_rq->removed.runnable_sum);
576
#ifdef CONFIG_FAIR_GROUP_SCHED
577 578
	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
			cfs_rq->tg_load_avg_contrib);
579 580
	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
			atomic_long_read(&cfs_rq->tg->load_avg));
581
#endif
582
#endif
583 584 585 586 587 588
#ifdef CONFIG_CFS_BANDWIDTH
	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
			cfs_rq->throttled);
	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
			cfs_rq->throttle_count);
#endif
P
Peter Zijlstra 已提交
589

590
#ifdef CONFIG_FAIR_GROUP_SCHED
591
	print_cfs_group_stats(m, cpu, cfs_rq->tg);
592
#endif
I
Ingo Molnar 已提交
593 594
}

595 596
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
597 598 599
#ifdef CONFIG_RT_GROUP_SCHED
	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
#else
600
	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
601
#endif
602 603 604

#define P(x) \
	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
605 606
#define PU(x) \
	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
607 608 609
#define PN(x) \
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))

610 611 612 613
	PU(rt_nr_running);
#ifdef CONFIG_SMP
	PU(rt_nr_migratory);
#endif
614 615 616 617 618
	P(rt_throttled);
	PN(rt_time);
	PN(rt_runtime);

#undef PN
619
#undef PU
620 621 622
#undef P
}

623 624
void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
{
625 626
	struct dl_bw *dl_bw;

627
	SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
628 629 630 631 632

#define PU(x) \
	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))

	PU(dl_nr_running);
633
#ifdef CONFIG_SMP
634
	PU(dl_nr_migratory);
635 636 637 638 639 640
	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
#else
	dl_bw = &dl_rq->dl_bw;
#endif
	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
641 642

#undef PU
643 644
}

645 646
extern __read_mostly int sched_clock_running;

647
static void print_cpu(struct seq_file *m, int cpu)
I
Ingo Molnar 已提交
648
{
649
	struct rq *rq = cpu_rq(cpu);
650
	unsigned long flags;
I
Ingo Molnar 已提交
651 652 653 654 655

#ifdef CONFIG_X86
	{
		unsigned int freq = cpu_khz ? : 1;

656
		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
I
Ingo Molnar 已提交
657 658 659
			   cpu, freq / 1000, (freq % 1000));
	}
#else
660
	SEQ_printf(m, "cpu#%d\n", cpu);
I
Ingo Molnar 已提交
661 662
#endif

663 664 665 666 667 668 669 670
#define P(x)								\
do {									\
	if (sizeof(rq->x) == 4)						\
		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
	else								\
		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
} while (0)

I
Ingo Molnar 已提交
671 672
#define PN(x) \
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
I
Ingo Molnar 已提交
673 674 675

	P(nr_running);
	SEQ_printf(m, "  .%-30s: %lu\n", "load",
676
		   rq->load.weight);
I
Ingo Molnar 已提交
677 678 679
	P(nr_switches);
	P(nr_load_updates);
	P(nr_uninterruptible);
I
Ingo Molnar 已提交
680
	PN(next_balance);
681
	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
I
Ingo Molnar 已提交
682
	PN(clock);
683
	PN(clock_task);
I
Ingo Molnar 已提交
684 685 686 687 688 689
	P(cpu_load[0]);
	P(cpu_load[1]);
	P(cpu_load[2]);
	P(cpu_load[3]);
	P(cpu_load[4]);
#undef P
I
Ingo Molnar 已提交
690
#undef PN
I
Ingo Molnar 已提交
691

M
Mike Galbraith 已提交
692
#ifdef CONFIG_SMP
693
#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
M
Mike Galbraith 已提交
694
	P64(avg_idle);
695
	P64(max_idle_balance_cost);
696
#undef P64
M
Mike Galbraith 已提交
697
#endif
P
Peter Zijlstra 已提交
698

699
#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
700 701 702 703 704 705 706
	if (schedstat_enabled()) {
		P(yld_count);
		P(sched_count);
		P(sched_goidle);
		P(ttwu_count);
		P(ttwu_local);
	}
P
Peter Zijlstra 已提交
707
#undef P
708

709
	spin_lock_irqsave(&sched_debug_lock, flags);
710
	print_cfs_stats(m, cpu);
711
	print_rt_stats(m, cpu);
712
	print_dl_stats(m, cpu);
I
Ingo Molnar 已提交
713

714
	print_rq(m, rq, cpu);
715
	spin_unlock_irqrestore(&sched_debug_lock, flags);
716
	SEQ_printf(m, "\n");
I
Ingo Molnar 已提交
717 718
}

719 720 721 722 723 724
static const char *sched_tunable_scaling_names[] = {
	"none",
	"logaritmic",
	"linear"
};

725
static void sched_debug_header(struct seq_file *m)
I
Ingo Molnar 已提交
726
{
727 728
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;
I
Ingo Molnar 已提交
729

730 731 732 733 734 735
	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

I
Ingo Molnar 已提交
736
	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
I
Ingo Molnar 已提交
737 738 739 740
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

741 742 743 744 745 746 747 748 749
#define P(x) \
	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(ktime);
	PN(sched_clk);
	PN(cpu_clk);
	P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
750
	P(sched_clock_stable());
751 752 753 754 755 756
#endif
#undef PN
#undef P

	SEQ_printf(m, "\n");
	SEQ_printf(m, "sysctl_sched\n");
I
Ingo Molnar 已提交
757

I
Ingo Molnar 已提交
758
#define P(x) \
759
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
I
Ingo Molnar 已提交
760
#define PN(x) \
761
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
I
Ingo Molnar 已提交
762
	PN(sysctl_sched_latency);
763
	PN(sysctl_sched_min_granularity);
I
Ingo Molnar 已提交
764
	PN(sysctl_sched_wakeup_granularity);
765
	P(sysctl_sched_child_runs_first);
I
Ingo Molnar 已提交
766 767 768 769
	P(sysctl_sched_features);
#undef PN
#undef P

770 771
	SEQ_printf(m, "  .%-40s: %d (%s)\n",
		"sysctl_sched_tunable_scaling",
772 773
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
774 775
	SEQ_printf(m, "\n");
}
776

777 778 779
static int sched_debug_show(struct seq_file *m, void *v)
{
	int cpu = (unsigned long)(v - 2);
I
Ingo Molnar 已提交
780

781 782 783 784
	if (cpu != -1)
		print_cpu(m, cpu);
	else
		sched_debug_header(m);
I
Ingo Molnar 已提交
785 786 787 788

	return 0;
}

789
void sysrq_sched_debug_show(void)
I
Ingo Molnar 已提交
790
{
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
	int cpu;

	sched_debug_header(NULL);
	for_each_online_cpu(cpu)
		print_cpu(NULL, cpu);

}

/*
 * This itererator needs some explanation.
 * It returns 1 for the header position.
 * This means 2 is cpu 0.
 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
 * to use cpumask_* to iterate over the cpus.
 */
static void *sched_debug_start(struct seq_file *file, loff_t *offset)
{
	unsigned long n = *offset;

	if (n == 0)
		return (void *) 1;

	n--;

	if (n > 0)
		n = cpumask_next(n - 1, cpu_online_mask);
	else
		n = cpumask_first(cpu_online_mask);

	*offset = n + 1;

	if (n < nr_cpu_ids)
		return (void *)(unsigned long)(n + 2);
	return NULL;
}

static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
{
	(*offset)++;
	return sched_debug_start(file, offset);
}

static void sched_debug_stop(struct seq_file *file, void *data)
{
}

static const struct seq_operations sched_debug_sops = {
	.start = sched_debug_start,
	.next = sched_debug_next,
	.stop = sched_debug_stop,
	.show = sched_debug_show,
};

static int sched_debug_release(struct inode *inode, struct file *file)
{
	seq_release(inode, file);

	return 0;
I
Ingo Molnar 已提交
849 850 851 852
}

static int sched_debug_open(struct inode *inode, struct file *filp)
{
853 854 855 856 857
	int ret = 0;

	ret = seq_open(filp, &sched_debug_sops);

	return ret;
I
Ingo Molnar 已提交
858 859
}

860
static const struct file_operations sched_debug_fops = {
I
Ingo Molnar 已提交
861 862 863
	.open		= sched_debug_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
864
	.release	= sched_debug_release,
I
Ingo Molnar 已提交
865 866 867 868 869 870
};

static int __init init_sched_debug_procfs(void)
{
	struct proc_dir_entry *pe;

871
	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
I
Ingo Molnar 已提交
872 873 874 875 876 877 878
	if (!pe)
		return -ENOMEM;
	return 0;
}

__initcall(init_sched_debug_procfs);

I
Ingo Molnar 已提交
879 880 881 882 883 884 885 886 887 888
#define __P(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
#define P(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))


889 890 891 892 893 894 895 896 897 898 899
#ifdef CONFIG_NUMA_BALANCING
void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
		unsigned long tpf, unsigned long gsf, unsigned long gpf)
{
	SEQ_printf(m, "numa_faults node=%d ", node);
	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
}
#endif


I
Ingo Molnar 已提交
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
static void sched_show_numa(struct task_struct *p, struct seq_file *m)
{
#ifdef CONFIG_NUMA_BALANCING
	struct mempolicy *pol;

	if (p->mm)
		P(mm->numa_scan_seq);

	task_lock(p);
	pol = p->mempolicy;
	if (pol && !(pol->flags & MPOL_F_MORON))
		pol = NULL;
	mpol_get(pol);
	task_unlock(p);

915 916 917 918 919 920
	P(numa_pages_migrated);
	P(numa_preferred_nid);
	P(total_numa_faults);
	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
			task_node(p), task_numa_group_id(p));
	show_numa_stats(p, m);
I
Ingo Molnar 已提交
921 922 923 924
	mpol_put(pol);
#endif
}

925 926
void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
						  struct seq_file *m)
I
Ingo Molnar 已提交
927
{
928
	unsigned long nr_switches;
I
Ingo Molnar 已提交
929

930
	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
931
						get_nr_threads(p));
932
	SEQ_printf(m,
933 934
		"---------------------------------------------------------"
		"----------\n");
935
#define __P(F) \
936
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
I
Ingo Molnar 已提交
937
#define P(F) \
938
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
939 940
#define P_SCHEDSTAT(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
941
#define __PN(F) \
942
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
I
Ingo Molnar 已提交
943
#define PN(F) \
944
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
945 946
#define PN_SCHEDSTAT(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
I
Ingo Molnar 已提交
947

I
Ingo Molnar 已提交
948 949 950
	PN(se.exec_start);
	PN(se.vruntime);
	PN(se.sum_exec_runtime);
I
Ingo Molnar 已提交
951

952 953 954 955
	nr_switches = p->nvcsw + p->nivcsw;

	P(se.nr_migrations);

956
	if (schedstat_enabled()) {
957 958
		u64 avg_atom, avg_per_cpu;

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
		PN_SCHEDSTAT(se.statistics.wait_start);
		PN_SCHEDSTAT(se.statistics.sleep_start);
		PN_SCHEDSTAT(se.statistics.block_start);
		PN_SCHEDSTAT(se.statistics.sleep_max);
		PN_SCHEDSTAT(se.statistics.block_max);
		PN_SCHEDSTAT(se.statistics.exec_max);
		PN_SCHEDSTAT(se.statistics.slice_max);
		PN_SCHEDSTAT(se.statistics.wait_max);
		PN_SCHEDSTAT(se.statistics.wait_sum);
		P_SCHEDSTAT(se.statistics.wait_count);
		PN_SCHEDSTAT(se.statistics.iowait_sum);
		P_SCHEDSTAT(se.statistics.iowait_count);
		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
		P_SCHEDSTAT(se.statistics.nr_wakeups);
		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
986

987 988
		avg_atom = p->se.sum_exec_runtime;
		if (nr_switches)
989
			avg_atom = div64_ul(avg_atom, nr_switches);
990 991 992 993
		else
			avg_atom = -1LL;

		avg_per_cpu = p->se.sum_exec_runtime;
994
		if (p->se.nr_migrations) {
R
Roman Zippel 已提交
995 996
			avg_per_cpu = div64_u64(avg_per_cpu,
						p->se.nr_migrations);
997
		} else {
998
			avg_per_cpu = -1LL;
999
		}
1000 1001 1002 1003

		__PN(avg_atom);
		__PN(avg_per_cpu);
	}
1004

1005
	__P(nr_switches);
1006
	SEQ_printf(m, "%-45s:%21Ld\n",
1007
		   "nr_voluntary_switches", (long long)p->nvcsw);
1008
	SEQ_printf(m, "%-45s:%21Ld\n",
1009 1010
		   "nr_involuntary_switches", (long long)p->nivcsw);

I
Ingo Molnar 已提交
1011
	P(se.load.weight);
1012
	P(se.runnable_weight);
1013
#ifdef CONFIG_SMP
1014
	P(se.avg.load_sum);
1015
	P(se.avg.runnable_load_sum);
1016 1017
	P(se.avg.util_sum);
	P(se.avg.load_avg);
1018
	P(se.avg.runnable_load_avg);
1019 1020
	P(se.avg.util_avg);
	P(se.avg.last_update_time);
1021
#endif
I
Ingo Molnar 已提交
1022 1023
	P(policy);
	P(prio);
1024 1025 1026 1027
	if (p->policy == SCHED_DEADLINE) {
		P(dl.runtime);
		P(dl.deadline);
	}
1028
#undef PN_SCHEDSTAT
I
Ingo Molnar 已提交
1029
#undef PN
1030
#undef __PN
1031
#undef P_SCHEDSTAT
1032 1033
#undef P
#undef __P
I
Ingo Molnar 已提交
1034 1035

	{
1036
		unsigned int this_cpu = raw_smp_processor_id();
I
Ingo Molnar 已提交
1037 1038
		u64 t0, t1;

1039 1040
		t0 = cpu_clock(this_cpu);
		t1 = cpu_clock(this_cpu);
1041
		SEQ_printf(m, "%-45s:%21Ld\n",
I
Ingo Molnar 已提交
1042 1043
			   "clock-delta", (long long)(t1-t0));
	}
I
Ingo Molnar 已提交
1044 1045

	sched_show_numa(p, m);
I
Ingo Molnar 已提交
1046 1047 1048 1049
}

void proc_sched_set_task(struct task_struct *p)
{
I
Ingo Molnar 已提交
1050
#ifdef CONFIG_SCHEDSTATS
1051
	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
I
Ingo Molnar 已提交
1052
#endif
I
Ingo Molnar 已提交
1053
}