debug.c 23.4 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/sched/debug.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12 13
 *
 * Print the CFS rbtree
 *
 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/proc_fs.h>
14
#include <linux/sched/mm.h>
15
#include <linux/sched/task.h>
I
Ingo Molnar 已提交
16 17 18
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/utsname.h>
I
Ingo Molnar 已提交
19
#include <linux/mempolicy.h>
20
#include <linux/debugfs.h>
I
Ingo Molnar 已提交
21

22 23
#include "sched.h"

24 25
static DEFINE_SPINLOCK(sched_debug_lock);

I
Ingo Molnar 已提交
26 27 28 29 30 31 32 33 34 35 36 37
/*
 * This allows printing both to /proc/sched_debug and
 * to the console
 */
#define SEQ_printf(m, x...)			\
 do {						\
	if (m)					\
		seq_printf(m, x);		\
	else					\
		printk(x);			\
 } while (0)

I
Ingo Molnar 已提交
38 39 40
/*
 * Ease the printing of nsec fields:
 */
I
Ingo Molnar 已提交
41
static long long nsec_high(unsigned long long nsec)
I
Ingo Molnar 已提交
42
{
I
Ingo Molnar 已提交
43
	if ((long long)nsec < 0) {
I
Ingo Molnar 已提交
44 45 46 47 48 49 50 51 52
		nsec = -nsec;
		do_div(nsec, 1000000);
		return -nsec;
	}
	do_div(nsec, 1000000);

	return nsec;
}

I
Ingo Molnar 已提交
53
static unsigned long nsec_low(unsigned long long nsec)
I
Ingo Molnar 已提交
54
{
I
Ingo Molnar 已提交
55
	if ((long long)nsec < 0)
I
Ingo Molnar 已提交
56 57 58 59 60 61 62
		nsec = -nsec;

	return do_div(nsec, 1000000);
}

#define SPLIT_NS(x) nsec_high(x), nsec_low(x)

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
#define SCHED_FEAT(name, enabled)	\
	#name ,

static const char * const sched_feat_names[] = {
#include "features.h"
};

#undef SCHED_FEAT

static int sched_feat_show(struct seq_file *m, void *v)
{
	int i;

	for (i = 0; i < __SCHED_FEAT_NR; i++) {
		if (!(sysctl_sched_features & (1UL << i)))
			seq_puts(m, "NO_");
		seq_printf(m, "%s ", sched_feat_names[i]);
	}
	seq_puts(m, "\n");

	return 0;
}

#ifdef HAVE_JUMP_LABEL

#define jump_label_key__true  STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE

#define SCHED_FEAT(name, enabled)	\
	jump_label_key__##enabled ,

struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};

#undef SCHED_FEAT

static void sched_feat_disable(int i)
{
	static_key_disable(&sched_feat_keys[i]);
}

static void sched_feat_enable(int i)
{
	static_key_enable(&sched_feat_keys[i]);
}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */

static int sched_feat_set(char *cmp)
{
	int i;
	int neg = 0;

	if (strncmp(cmp, "NO_", 3) == 0) {
		neg = 1;
		cmp += 3;
	}

	for (i = 0; i < __SCHED_FEAT_NR; i++) {
		if (strcmp(cmp, sched_feat_names[i]) == 0) {
			if (neg) {
				sysctl_sched_features &= ~(1UL << i);
				sched_feat_disable(i);
			} else {
				sysctl_sched_features |= (1UL << i);
				sched_feat_enable(i);
			}
			break;
		}
	}

	return i;
}

static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	char buf[64];
	char *cmp;
	int i;
	struct inode *inode;

	if (cnt > 63)
		cnt = 63;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;
	cmp = strstrip(buf);

	/* Ensure the static_key remains in a consistent state */
	inode = file_inode(filp);
	inode_lock(inode);
	i = sched_feat_set(cmp);
	inode_unlock(inode);
	if (i == __SCHED_FEAT_NR)
		return -EINVAL;

	*ppos += cnt;

	return cnt;
}

static int sched_feat_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, sched_feat_show, NULL);
}

static const struct file_operations sched_feat_fops = {
	.open		= sched_feat_open,
	.write		= sched_feat_write,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static __init int sched_init_debug(void)
{
	debugfs_create_file("sched_features", 0644, NULL, NULL,
			&sched_feat_fops);

	return 0;
}
late_initcall(sched_init_debug);

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
#ifdef CONFIG_SMP

#ifdef CONFIG_SYSCTL

static struct ctl_table sd_ctl_dir[] = {
	{
		.procname	= "sched_domain",
		.mode		= 0555,
	},
	{}
};

static struct ctl_table sd_ctl_root[] = {
	{
		.procname	= "kernel",
		.mode		= 0555,
		.child		= sd_ctl_dir,
	},
	{}
};

static struct ctl_table *sd_alloc_ctl_entry(int n)
{
	struct ctl_table *entry =
		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);

	return entry;
}

static void sd_free_ctl_entry(struct ctl_table **tablep)
{
	struct ctl_table *entry;

	/*
	 * In the intermediate directories, both the child directory and
	 * procname are dynamically allocated and could fail but the mode
	 * will always be set. In the lowest directory the names are
	 * static strings and all have proc handlers.
	 */
	for (entry = *tablep; entry->mode; entry++) {
		if (entry->child)
			sd_free_ctl_entry(&entry->child);
		if (entry->proc_handler == NULL)
			kfree(entry->procname);
	}

	kfree(*tablep);
	*tablep = NULL;
}

static int min_load_idx = 0;
static int max_load_idx = CPU_LOAD_IDX_MAX-1;

static void
set_table_entry(struct ctl_table *entry,
		const char *procname, void *data, int maxlen,
		umode_t mode, proc_handler *proc_handler,
		bool load_idx)
{
	entry->procname = procname;
	entry->data = data;
	entry->maxlen = maxlen;
	entry->mode = mode;
	entry->proc_handler = proc_handler;

	if (load_idx) {
		entry->extra1 = &min_load_idx;
		entry->extra2 = &max_load_idx;
	}
}

static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
	struct ctl_table *table = sd_alloc_ctl_entry(14);

	if (table == NULL)
		return NULL;

	set_table_entry(&table[0], "min_interval", &sd->min_interval,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[1], "max_interval", &sd->max_interval,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[9], "cache_nice_tries",
		&sd->cache_nice_tries,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[10], "flags", &sd->flags,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[11], "max_newidle_lb_cost",
		&sd->max_newidle_lb_cost,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[12], "name", sd->name,
		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
	/* &table[13] is terminator */

	return table;
}

static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
	struct ctl_table *entry, *table;
	struct sched_domain *sd;
	int domain_num = 0, i;
	char buf[32];

	for_each_domain(cpu, sd)
		domain_num++;
	entry = table = sd_alloc_ctl_entry(domain_num + 1);
	if (table == NULL)
		return NULL;

	i = 0;
	for_each_domain(cpu, sd) {
		snprintf(buf, 32, "domain%d", i);
		entry->procname = kstrdup(buf, GFP_KERNEL);
		entry->mode = 0555;
		entry->child = sd_alloc_ctl_domain_table(sd);
		entry++;
		i++;
	}
	return table;
}

static struct ctl_table_header *sd_sysctl_header;
void register_sched_domain_sysctl(void)
{
	int i, cpu_num = num_possible_cpus();
	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
	char buf[32];

	WARN_ON(sd_ctl_dir[0].child);
	sd_ctl_dir[0].child = entry;

	if (entry == NULL)
		return;

	for_each_possible_cpu(i) {
		snprintf(buf, 32, "cpu%d", i);
		entry->procname = kstrdup(buf, GFP_KERNEL);
		entry->mode = 0555;
		entry->child = sd_alloc_ctl_cpu_table(i);
		entry++;
	}

	WARN_ON(sd_sysctl_header);
	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}

/* may be called multiple times per register */
void unregister_sched_domain_sysctl(void)
{
	unregister_sysctl_table(sd_sysctl_header);
	sd_sysctl_header = NULL;
	if (sd_ctl_dir[0].child)
		sd_free_ctl_entry(&sd_ctl_dir[0].child);
}
#endif /* CONFIG_SYSCTL */
#endif /* CONFIG_SMP */

366
#ifdef CONFIG_FAIR_GROUP_SCHED
367
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
368 369 370 371 372
{
	struct sched_entity *se = tg->se[cpu];

#define P(F) \
	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
373 374
#define P_SCHEDSTAT(F) \
	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)schedstat_val(F))
375 376
#define PN(F) \
	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
377 378
#define PN_SCHEDSTAT(F) \
	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
379

Y
Yuyang Du 已提交
380
	if (!se)
381 382
		return;

383 384 385
	PN(se->exec_start);
	PN(se->vruntime);
	PN(se->sum_exec_runtime);
386
	if (schedstat_enabled()) {
387 388 389 390 391 392 393 394 395 396
		PN_SCHEDSTAT(se->statistics.wait_start);
		PN_SCHEDSTAT(se->statistics.sleep_start);
		PN_SCHEDSTAT(se->statistics.block_start);
		PN_SCHEDSTAT(se->statistics.sleep_max);
		PN_SCHEDSTAT(se->statistics.block_max);
		PN_SCHEDSTAT(se->statistics.exec_max);
		PN_SCHEDSTAT(se->statistics.slice_max);
		PN_SCHEDSTAT(se->statistics.wait_max);
		PN_SCHEDSTAT(se->statistics.wait_sum);
		P_SCHEDSTAT(se->statistics.wait_count);
397
	}
398
	P(se->load.weight);
399
#ifdef CONFIG_SMP
400 401
	P(se->avg.load_avg);
	P(se->avg.util_avg);
402
#endif
403 404

#undef PN_SCHEDSTAT
405
#undef PN
406
#undef P_SCHEDSTAT
407 408 409 410
#undef P
}
#endif

411 412 413 414 415
#ifdef CONFIG_CGROUP_SCHED
static char group_path[PATH_MAX];

static char *task_group_path(struct task_group *tg)
{
416 417 418
	if (autogroup_path(tg, group_path, PATH_MAX))
		return group_path;

419 420
	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
	return group_path;
421 422 423
}
#endif

I
Ingo Molnar 已提交
424
static void
425
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
I
Ingo Molnar 已提交
426 427 428 429 430 431
{
	if (rq->curr == p)
		SEQ_printf(m, "R");
	else
		SEQ_printf(m, " ");

I
Ingo Molnar 已提交
432
	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
433
		p->comm, task_pid_nr(p),
I
Ingo Molnar 已提交
434
		SPLIT_NS(p->se.vruntime),
I
Ingo Molnar 已提交
435
		(long long)(p->nvcsw + p->nivcsw),
436
		p->prio);
437

438
	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
439
		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
440
		SPLIT_NS(p->se.sum_exec_runtime),
441
		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
442

I
Ingo Molnar 已提交
443
#ifdef CONFIG_NUMA_BALANCING
444
	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
I
Ingo Molnar 已提交
445
#endif
446 447 448
#ifdef CONFIG_CGROUP_SCHED
	SEQ_printf(m, " %s", task_group_path(task_group(p)));
#endif
449 450

	SEQ_printf(m, "\n");
I
Ingo Molnar 已提交
451 452
}

453
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
I
Ingo Molnar 已提交
454 455 456 457 458
{
	struct task_struct *g, *p;

	SEQ_printf(m,
	"\nrunnable tasks:\n"
459
	"            task   PID         tree-key  switches  prio"
460
	"     wait-time             sum-exec        sum-sleep\n"
461
	"------------------------------------------------------"
462
	"----------------------------------------------------\n");
I
Ingo Molnar 已提交
463

464
	rcu_read_lock();
465
	for_each_process_thread(g, p) {
I
Ingo Molnar 已提交
466
		if (task_cpu(p) != rq_cpu)
I
Ingo Molnar 已提交
467 468
			continue;

469
		print_task(m, rq, p);
470
	}
471
	rcu_read_unlock();
I
Ingo Molnar 已提交
472 473
}

474
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
I
Ingo Molnar 已提交
475
{
I
Ingo Molnar 已提交
476 477
	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
		spread, rq0_min_vruntime, spread0;
478
	struct rq *rq = cpu_rq(cpu);
I
Ingo Molnar 已提交
479 480 481
	struct sched_entity *last;
	unsigned long flags;

482 483 484
#ifdef CONFIG_FAIR_GROUP_SCHED
	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
#else
485
	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
486
#endif
I
Ingo Molnar 已提交
487 488
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
			SPLIT_NS(cfs_rq->exec_clock));
I
Ingo Molnar 已提交
489

490
	raw_spin_lock_irqsave(&rq->lock, flags);
I
Ingo Molnar 已提交
491
	if (cfs_rq->rb_leftmost)
492
		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
I
Ingo Molnar 已提交
493 494 495
	last = __pick_last_entity(cfs_rq);
	if (last)
		max_vruntime = last->vruntime;
P
Peter Zijlstra 已提交
496
	min_vruntime = cfs_rq->min_vruntime;
497
	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
498
	raw_spin_unlock_irqrestore(&rq->lock, flags);
I
Ingo Molnar 已提交
499 500 501 502 503 504
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
			SPLIT_NS(MIN_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
			SPLIT_NS(min_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
			SPLIT_NS(max_vruntime));
I
Ingo Molnar 已提交
505
	spread = max_vruntime - MIN_vruntime;
I
Ingo Molnar 已提交
506 507
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
			SPLIT_NS(spread));
I
Ingo Molnar 已提交
508
	spread0 = min_vruntime - rq0_min_vruntime;
I
Ingo Molnar 已提交
509 510
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
			SPLIT_NS(spread0));
P
Peter Zijlstra 已提交
511
	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
P
Peter Zijlstra 已提交
512
			cfs_rq->nr_spread_over);
513
	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
P
Peter Zijlstra 已提交
514
	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
515
#ifdef CONFIG_SMP
516 517
	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
			cfs_rq->avg.load_avg);
518 519
	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
			cfs_rq->runnable_load_avg);
520 521 522 523 524 525
	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
			cfs_rq->avg.util_avg);
	SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
			atomic_long_read(&cfs_rq->removed_load_avg));
	SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
			atomic_long_read(&cfs_rq->removed_util_avg));
526
#ifdef CONFIG_FAIR_GROUP_SCHED
527 528
	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
			cfs_rq->tg_load_avg_contrib);
529 530
	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
			atomic_long_read(&cfs_rq->tg->load_avg));
531
#endif
532
#endif
533 534 535 536 537 538
#ifdef CONFIG_CFS_BANDWIDTH
	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
			cfs_rq->throttled);
	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
			cfs_rq->throttle_count);
#endif
P
Peter Zijlstra 已提交
539

540
#ifdef CONFIG_FAIR_GROUP_SCHED
541
	print_cfs_group_stats(m, cpu, cfs_rq->tg);
542
#endif
I
Ingo Molnar 已提交
543 544
}

545 546
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
547 548 549
#ifdef CONFIG_RT_GROUP_SCHED
	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
#else
550
	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
551
#endif
552 553 554

#define P(x) \
	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
555 556
#define PU(x) \
	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
557 558 559
#define PN(x) \
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))

560 561 562 563
	PU(rt_nr_running);
#ifdef CONFIG_SMP
	PU(rt_nr_migratory);
#endif
564 565 566 567 568
	P(rt_throttled);
	PN(rt_time);
	PN(rt_runtime);

#undef PN
569
#undef PU
570 571 572
#undef P
}

573 574
void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
{
575 576
	struct dl_bw *dl_bw;

577
	SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
578 579 580 581 582

#define PU(x) \
	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))

	PU(dl_nr_running);
583
#ifdef CONFIG_SMP
584
	PU(dl_nr_migratory);
585 586 587 588 589 590
	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
#else
	dl_bw = &dl_rq->dl_bw;
#endif
	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
591 592

#undef PU
593 594
}

595 596
extern __read_mostly int sched_clock_running;

597
static void print_cpu(struct seq_file *m, int cpu)
I
Ingo Molnar 已提交
598
{
599
	struct rq *rq = cpu_rq(cpu);
600
	unsigned long flags;
I
Ingo Molnar 已提交
601 602 603 604 605

#ifdef CONFIG_X86
	{
		unsigned int freq = cpu_khz ? : 1;

606
		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
I
Ingo Molnar 已提交
607 608 609
			   cpu, freq / 1000, (freq % 1000));
	}
#else
610
	SEQ_printf(m, "cpu#%d\n", cpu);
I
Ingo Molnar 已提交
611 612
#endif

613 614 615 616 617 618 619 620
#define P(x)								\
do {									\
	if (sizeof(rq->x) == 4)						\
		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
	else								\
		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
} while (0)

I
Ingo Molnar 已提交
621 622
#define PN(x) \
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
I
Ingo Molnar 已提交
623 624 625

	P(nr_running);
	SEQ_printf(m, "  .%-30s: %lu\n", "load",
626
		   rq->load.weight);
I
Ingo Molnar 已提交
627 628 629
	P(nr_switches);
	P(nr_load_updates);
	P(nr_uninterruptible);
I
Ingo Molnar 已提交
630
	PN(next_balance);
631
	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
I
Ingo Molnar 已提交
632
	PN(clock);
633
	PN(clock_task);
I
Ingo Molnar 已提交
634 635 636 637 638 639
	P(cpu_load[0]);
	P(cpu_load[1]);
	P(cpu_load[2]);
	P(cpu_load[3]);
	P(cpu_load[4]);
#undef P
I
Ingo Molnar 已提交
640
#undef PN
I
Ingo Molnar 已提交
641

M
Mike Galbraith 已提交
642
#ifdef CONFIG_SMP
643
#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
M
Mike Galbraith 已提交
644
	P64(avg_idle);
645
	P64(max_idle_balance_cost);
646
#undef P64
M
Mike Galbraith 已提交
647
#endif
P
Peter Zijlstra 已提交
648

649
#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
650 651 652 653 654 655 656
	if (schedstat_enabled()) {
		P(yld_count);
		P(sched_count);
		P(sched_goidle);
		P(ttwu_count);
		P(ttwu_local);
	}
P
Peter Zijlstra 已提交
657
#undef P
658

659
	spin_lock_irqsave(&sched_debug_lock, flags);
660
	print_cfs_stats(m, cpu);
661
	print_rt_stats(m, cpu);
662
	print_dl_stats(m, cpu);
I
Ingo Molnar 已提交
663

664
	print_rq(m, rq, cpu);
665
	spin_unlock_irqrestore(&sched_debug_lock, flags);
666
	SEQ_printf(m, "\n");
I
Ingo Molnar 已提交
667 668
}

669 670 671 672 673 674
static const char *sched_tunable_scaling_names[] = {
	"none",
	"logaritmic",
	"linear"
};

675
static void sched_debug_header(struct seq_file *m)
I
Ingo Molnar 已提交
676
{
677 678
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;
I
Ingo Molnar 已提交
679

680 681 682 683 684 685
	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

I
Ingo Molnar 已提交
686
	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
I
Ingo Molnar 已提交
687 688 689 690
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

691 692 693 694 695 696 697 698 699
#define P(x) \
	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(ktime);
	PN(sched_clk);
	PN(cpu_clk);
	P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
700
	P(sched_clock_stable());
701 702 703 704 705 706
#endif
#undef PN
#undef P

	SEQ_printf(m, "\n");
	SEQ_printf(m, "sysctl_sched\n");
I
Ingo Molnar 已提交
707

I
Ingo Molnar 已提交
708
#define P(x) \
709
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
I
Ingo Molnar 已提交
710
#define PN(x) \
711
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
I
Ingo Molnar 已提交
712
	PN(sysctl_sched_latency);
713
	PN(sysctl_sched_min_granularity);
I
Ingo Molnar 已提交
714
	PN(sysctl_sched_wakeup_granularity);
715
	P(sysctl_sched_child_runs_first);
I
Ingo Molnar 已提交
716 717 718 719
	P(sysctl_sched_features);
#undef PN
#undef P

720 721
	SEQ_printf(m, "  .%-40s: %d (%s)\n",
		"sysctl_sched_tunable_scaling",
722 723
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
724 725
	SEQ_printf(m, "\n");
}
726

727 728 729
static int sched_debug_show(struct seq_file *m, void *v)
{
	int cpu = (unsigned long)(v - 2);
I
Ingo Molnar 已提交
730

731 732 733 734
	if (cpu != -1)
		print_cpu(m, cpu);
	else
		sched_debug_header(m);
I
Ingo Molnar 已提交
735 736 737 738

	return 0;
}

739
void sysrq_sched_debug_show(void)
I
Ingo Molnar 已提交
740
{
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
	int cpu;

	sched_debug_header(NULL);
	for_each_online_cpu(cpu)
		print_cpu(NULL, cpu);

}

/*
 * This itererator needs some explanation.
 * It returns 1 for the header position.
 * This means 2 is cpu 0.
 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
 * to use cpumask_* to iterate over the cpus.
 */
static void *sched_debug_start(struct seq_file *file, loff_t *offset)
{
	unsigned long n = *offset;

	if (n == 0)
		return (void *) 1;

	n--;

	if (n > 0)
		n = cpumask_next(n - 1, cpu_online_mask);
	else
		n = cpumask_first(cpu_online_mask);

	*offset = n + 1;

	if (n < nr_cpu_ids)
		return (void *)(unsigned long)(n + 2);
	return NULL;
}

static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
{
	(*offset)++;
	return sched_debug_start(file, offset);
}

static void sched_debug_stop(struct seq_file *file, void *data)
{
}

static const struct seq_operations sched_debug_sops = {
	.start = sched_debug_start,
	.next = sched_debug_next,
	.stop = sched_debug_stop,
	.show = sched_debug_show,
};

static int sched_debug_release(struct inode *inode, struct file *file)
{
	seq_release(inode, file);

	return 0;
I
Ingo Molnar 已提交
799 800 801 802
}

static int sched_debug_open(struct inode *inode, struct file *filp)
{
803 804 805 806 807
	int ret = 0;

	ret = seq_open(filp, &sched_debug_sops);

	return ret;
I
Ingo Molnar 已提交
808 809
}

810
static const struct file_operations sched_debug_fops = {
I
Ingo Molnar 已提交
811 812 813
	.open		= sched_debug_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
814
	.release	= sched_debug_release,
I
Ingo Molnar 已提交
815 816 817 818 819 820
};

static int __init init_sched_debug_procfs(void)
{
	struct proc_dir_entry *pe;

821
	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
I
Ingo Molnar 已提交
822 823 824 825 826 827 828
	if (!pe)
		return -ENOMEM;
	return 0;
}

__initcall(init_sched_debug_procfs);

I
Ingo Molnar 已提交
829 830 831 832 833 834 835 836 837 838
#define __P(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
#define P(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))


839 840 841 842 843 844 845 846 847 848 849
#ifdef CONFIG_NUMA_BALANCING
void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
		unsigned long tpf, unsigned long gsf, unsigned long gpf)
{
	SEQ_printf(m, "numa_faults node=%d ", node);
	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
}
#endif


I
Ingo Molnar 已提交
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
static void sched_show_numa(struct task_struct *p, struct seq_file *m)
{
#ifdef CONFIG_NUMA_BALANCING
	struct mempolicy *pol;

	if (p->mm)
		P(mm->numa_scan_seq);

	task_lock(p);
	pol = p->mempolicy;
	if (pol && !(pol->flags & MPOL_F_MORON))
		pol = NULL;
	mpol_get(pol);
	task_unlock(p);

865 866 867 868 869 870
	P(numa_pages_migrated);
	P(numa_preferred_nid);
	P(total_numa_faults);
	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
			task_node(p), task_numa_group_id(p));
	show_numa_stats(p, m);
I
Ingo Molnar 已提交
871 872 873 874
	mpol_put(pol);
#endif
}

875 876
void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
						  struct seq_file *m)
I
Ingo Molnar 已提交
877
{
878
	unsigned long nr_switches;
I
Ingo Molnar 已提交
879

880
	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
881
						get_nr_threads(p));
882
	SEQ_printf(m,
883 884
		"---------------------------------------------------------"
		"----------\n");
885
#define __P(F) \
886
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
I
Ingo Molnar 已提交
887
#define P(F) \
888
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
889 890
#define P_SCHEDSTAT(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
891
#define __PN(F) \
892
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
I
Ingo Molnar 已提交
893
#define PN(F) \
894
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
895 896
#define PN_SCHEDSTAT(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
I
Ingo Molnar 已提交
897

I
Ingo Molnar 已提交
898 899 900
	PN(se.exec_start);
	PN(se.vruntime);
	PN(se.sum_exec_runtime);
I
Ingo Molnar 已提交
901

902 903 904 905
	nr_switches = p->nvcsw + p->nivcsw;

	P(se.nr_migrations);

906
	if (schedstat_enabled()) {
907 908
		u64 avg_atom, avg_per_cpu;

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
		PN_SCHEDSTAT(se.statistics.wait_start);
		PN_SCHEDSTAT(se.statistics.sleep_start);
		PN_SCHEDSTAT(se.statistics.block_start);
		PN_SCHEDSTAT(se.statistics.sleep_max);
		PN_SCHEDSTAT(se.statistics.block_max);
		PN_SCHEDSTAT(se.statistics.exec_max);
		PN_SCHEDSTAT(se.statistics.slice_max);
		PN_SCHEDSTAT(se.statistics.wait_max);
		PN_SCHEDSTAT(se.statistics.wait_sum);
		P_SCHEDSTAT(se.statistics.wait_count);
		PN_SCHEDSTAT(se.statistics.iowait_sum);
		P_SCHEDSTAT(se.statistics.iowait_count);
		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
		P_SCHEDSTAT(se.statistics.nr_wakeups);
		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
936

937 938
		avg_atom = p->se.sum_exec_runtime;
		if (nr_switches)
939
			avg_atom = div64_ul(avg_atom, nr_switches);
940 941 942 943
		else
			avg_atom = -1LL;

		avg_per_cpu = p->se.sum_exec_runtime;
944
		if (p->se.nr_migrations) {
R
Roman Zippel 已提交
945 946
			avg_per_cpu = div64_u64(avg_per_cpu,
						p->se.nr_migrations);
947
		} else {
948
			avg_per_cpu = -1LL;
949
		}
950 951 952 953

		__PN(avg_atom);
		__PN(avg_per_cpu);
	}
954

955
	__P(nr_switches);
956
	SEQ_printf(m, "%-45s:%21Ld\n",
957
		   "nr_voluntary_switches", (long long)p->nvcsw);
958
	SEQ_printf(m, "%-45s:%21Ld\n",
959 960
		   "nr_involuntary_switches", (long long)p->nivcsw);

I
Ingo Molnar 已提交
961
	P(se.load.weight);
962
#ifdef CONFIG_SMP
963 964 965 966 967
	P(se.avg.load_sum);
	P(se.avg.util_sum);
	P(se.avg.load_avg);
	P(se.avg.util_avg);
	P(se.avg.last_update_time);
968
#endif
I
Ingo Molnar 已提交
969 970
	P(policy);
	P(prio);
971 972 973 974
	if (p->policy == SCHED_DEADLINE) {
		P(dl.runtime);
		P(dl.deadline);
	}
975
#undef PN_SCHEDSTAT
I
Ingo Molnar 已提交
976
#undef PN
977
#undef __PN
978
#undef P_SCHEDSTAT
979 980
#undef P
#undef __P
I
Ingo Molnar 已提交
981 982

	{
983
		unsigned int this_cpu = raw_smp_processor_id();
I
Ingo Molnar 已提交
984 985
		u64 t0, t1;

986 987
		t0 = cpu_clock(this_cpu);
		t1 = cpu_clock(this_cpu);
988
		SEQ_printf(m, "%-45s:%21Ld\n",
I
Ingo Molnar 已提交
989 990
			   "clock-delta", (long long)(t1-t0));
	}
I
Ingo Molnar 已提交
991 992

	sched_show_numa(p, m);
I
Ingo Molnar 已提交
993 994 995 996
}

void proc_sched_set_task(struct task_struct *p)
{
I
Ingo Molnar 已提交
997
#ifdef CONFIG_SCHEDSTATS
998
	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
I
Ingo Molnar 已提交
999
#endif
I
Ingo Molnar 已提交
1000
}