debug.c 23.5 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/sched/debug.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12 13
 *
 * Print the CFS rbtree
 *
 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/proc_fs.h>
14
#include <linux/sched/mm.h>
15
#include <linux/sched/task.h>
I
Ingo Molnar 已提交
16 17 18
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/utsname.h>
I
Ingo Molnar 已提交
19
#include <linux/mempolicy.h>
20
#include <linux/debugfs.h>
I
Ingo Molnar 已提交
21

22 23
#include "sched.h"

24 25
static DEFINE_SPINLOCK(sched_debug_lock);

I
Ingo Molnar 已提交
26 27 28 29 30 31 32 33 34 35 36 37
/*
 * This allows printing both to /proc/sched_debug and
 * to the console
 */
#define SEQ_printf(m, x...)			\
 do {						\
	if (m)					\
		seq_printf(m, x);		\
	else					\
		printk(x);			\
 } while (0)

I
Ingo Molnar 已提交
38 39 40
/*
 * Ease the printing of nsec fields:
 */
I
Ingo Molnar 已提交
41
static long long nsec_high(unsigned long long nsec)
I
Ingo Molnar 已提交
42
{
I
Ingo Molnar 已提交
43
	if ((long long)nsec < 0) {
I
Ingo Molnar 已提交
44 45 46 47 48 49 50 51 52
		nsec = -nsec;
		do_div(nsec, 1000000);
		return -nsec;
	}
	do_div(nsec, 1000000);

	return nsec;
}

I
Ingo Molnar 已提交
53
static unsigned long nsec_low(unsigned long long nsec)
I
Ingo Molnar 已提交
54
{
I
Ingo Molnar 已提交
55
	if ((long long)nsec < 0)
I
Ingo Molnar 已提交
56 57 58 59 60 61 62
		nsec = -nsec;

	return do_div(nsec, 1000000);
}

#define SPLIT_NS(x) nsec_high(x), nsec_low(x)

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
#define SCHED_FEAT(name, enabled)	\
	#name ,

static const char * const sched_feat_names[] = {
#include "features.h"
};

#undef SCHED_FEAT

static int sched_feat_show(struct seq_file *m, void *v)
{
	int i;

	for (i = 0; i < __SCHED_FEAT_NR; i++) {
		if (!(sysctl_sched_features & (1UL << i)))
			seq_puts(m, "NO_");
		seq_printf(m, "%s ", sched_feat_names[i]);
	}
	seq_puts(m, "\n");

	return 0;
}

#ifdef HAVE_JUMP_LABEL

#define jump_label_key__true  STATIC_KEY_INIT_TRUE
#define jump_label_key__false STATIC_KEY_INIT_FALSE

#define SCHED_FEAT(name, enabled)	\
	jump_label_key__##enabled ,

struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};

#undef SCHED_FEAT

static void sched_feat_disable(int i)
{
	static_key_disable(&sched_feat_keys[i]);
}

static void sched_feat_enable(int i)
{
	static_key_enable(&sched_feat_keys[i]);
}
#else
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */

static int sched_feat_set(char *cmp)
{
	int i;
	int neg = 0;

	if (strncmp(cmp, "NO_", 3) == 0) {
		neg = 1;
		cmp += 3;
	}

	for (i = 0; i < __SCHED_FEAT_NR; i++) {
		if (strcmp(cmp, sched_feat_names[i]) == 0) {
			if (neg) {
				sysctl_sched_features &= ~(1UL << i);
				sched_feat_disable(i);
			} else {
				sysctl_sched_features |= (1UL << i);
				sched_feat_enable(i);
			}
			break;
		}
	}

	return i;
}

static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	char buf[64];
	char *cmp;
	int i;
	struct inode *inode;

	if (cnt > 63)
		cnt = 63;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;
	cmp = strstrip(buf);

	/* Ensure the static_key remains in a consistent state */
	inode = file_inode(filp);
	inode_lock(inode);
	i = sched_feat_set(cmp);
	inode_unlock(inode);
	if (i == __SCHED_FEAT_NR)
		return -EINVAL;

	*ppos += cnt;

	return cnt;
}

static int sched_feat_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, sched_feat_show, NULL);
}

static const struct file_operations sched_feat_fops = {
	.open		= sched_feat_open,
	.write		= sched_feat_write,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static __init int sched_init_debug(void)
{
	debugfs_create_file("sched_features", 0644, NULL, NULL,
			&sched_feat_fops);

	return 0;
}
late_initcall(sched_init_debug);

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
#ifdef CONFIG_SMP

#ifdef CONFIG_SYSCTL

static struct ctl_table sd_ctl_dir[] = {
	{
		.procname	= "sched_domain",
		.mode		= 0555,
	},
	{}
};

static struct ctl_table sd_ctl_root[] = {
	{
		.procname	= "kernel",
		.mode		= 0555,
		.child		= sd_ctl_dir,
	},
	{}
};

static struct ctl_table *sd_alloc_ctl_entry(int n)
{
	struct ctl_table *entry =
		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);

	return entry;
}

static void sd_free_ctl_entry(struct ctl_table **tablep)
{
	struct ctl_table *entry;

	/*
	 * In the intermediate directories, both the child directory and
	 * procname are dynamically allocated and could fail but the mode
	 * will always be set. In the lowest directory the names are
	 * static strings and all have proc handlers.
	 */
	for (entry = *tablep; entry->mode; entry++) {
		if (entry->child)
			sd_free_ctl_entry(&entry->child);
		if (entry->proc_handler == NULL)
			kfree(entry->procname);
	}

	kfree(*tablep);
	*tablep = NULL;
}

static int min_load_idx = 0;
static int max_load_idx = CPU_LOAD_IDX_MAX-1;

static void
set_table_entry(struct ctl_table *entry,
		const char *procname, void *data, int maxlen,
		umode_t mode, proc_handler *proc_handler,
		bool load_idx)
{
	entry->procname = procname;
	entry->data = data;
	entry->maxlen = maxlen;
	entry->mode = mode;
	entry->proc_handler = proc_handler;

	if (load_idx) {
		entry->extra1 = &min_load_idx;
		entry->extra2 = &max_load_idx;
	}
}

static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
	struct ctl_table *table = sd_alloc_ctl_entry(14);

	if (table == NULL)
		return NULL;

	set_table_entry(&table[0], "min_interval", &sd->min_interval,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[1], "max_interval", &sd->max_interval,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
		sizeof(int), 0644, proc_dointvec_minmax, true);
	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[9], "cache_nice_tries",
		&sd->cache_nice_tries,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[10], "flags", &sd->flags,
		sizeof(int), 0644, proc_dointvec_minmax, false);
	set_table_entry(&table[11], "max_newidle_lb_cost",
		&sd->max_newidle_lb_cost,
		sizeof(long), 0644, proc_doulongvec_minmax, false);
	set_table_entry(&table[12], "name", sd->name,
		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
	/* &table[13] is terminator */

	return table;
}

static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
	struct ctl_table *entry, *table;
	struct sched_domain *sd;
	int domain_num = 0, i;
	char buf[32];

	for_each_domain(cpu, sd)
		domain_num++;
	entry = table = sd_alloc_ctl_entry(domain_num + 1);
	if (table == NULL)
		return NULL;

	i = 0;
	for_each_domain(cpu, sd) {
		snprintf(buf, 32, "domain%d", i);
		entry->procname = kstrdup(buf, GFP_KERNEL);
		entry->mode = 0555;
		entry->child = sd_alloc_ctl_domain_table(sd);
		entry++;
		i++;
	}
	return table;
}

static struct ctl_table_header *sd_sysctl_header;
void register_sched_domain_sysctl(void)
{
	int i, cpu_num = num_possible_cpus();
	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
	char buf[32];

	WARN_ON(sd_ctl_dir[0].child);
	sd_ctl_dir[0].child = entry;

	if (entry == NULL)
		return;

	for_each_possible_cpu(i) {
		snprintf(buf, 32, "cpu%d", i);
		entry->procname = kstrdup(buf, GFP_KERNEL);
		entry->mode = 0555;
		entry->child = sd_alloc_ctl_cpu_table(i);
		entry++;
	}

	WARN_ON(sd_sysctl_header);
	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}

/* may be called multiple times per register */
void unregister_sched_domain_sysctl(void)
{
	unregister_sysctl_table(sd_sysctl_header);
	sd_sysctl_header = NULL;
	if (sd_ctl_dir[0].child)
		sd_free_ctl_entry(&sd_ctl_dir[0].child);
}
#endif /* CONFIG_SYSCTL */
#endif /* CONFIG_SMP */

366
#ifdef CONFIG_FAIR_GROUP_SCHED
367
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
368 369 370 371 372
{
	struct sched_entity *se = tg->se[cpu];

#define P(F) \
	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
373 374
#define P_SCHEDSTAT(F) \
	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)schedstat_val(F))
375 376
#define PN(F) \
	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
377 378
#define PN_SCHEDSTAT(F) \
	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
379

Y
Yuyang Du 已提交
380
	if (!se)
381 382
		return;

383 384 385
	PN(se->exec_start);
	PN(se->vruntime);
	PN(se->sum_exec_runtime);
386
	if (schedstat_enabled()) {
387 388 389 390 391 392 393 394 395 396
		PN_SCHEDSTAT(se->statistics.wait_start);
		PN_SCHEDSTAT(se->statistics.sleep_start);
		PN_SCHEDSTAT(se->statistics.block_start);
		PN_SCHEDSTAT(se->statistics.sleep_max);
		PN_SCHEDSTAT(se->statistics.block_max);
		PN_SCHEDSTAT(se->statistics.exec_max);
		PN_SCHEDSTAT(se->statistics.slice_max);
		PN_SCHEDSTAT(se->statistics.wait_max);
		PN_SCHEDSTAT(se->statistics.wait_sum);
		P_SCHEDSTAT(se->statistics.wait_count);
397
	}
398
	P(se->load.weight);
399
#ifdef CONFIG_SMP
400 401
	P(se->avg.load_avg);
	P(se->avg.util_avg);
402
#endif
403 404

#undef PN_SCHEDSTAT
405
#undef PN
406
#undef P_SCHEDSTAT
407 408 409 410
#undef P
}
#endif

411 412 413 414 415
#ifdef CONFIG_CGROUP_SCHED
static char group_path[PATH_MAX];

static char *task_group_path(struct task_group *tg)
{
416 417 418
	if (autogroup_path(tg, group_path, PATH_MAX))
		return group_path;

419 420
	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
	return group_path;
421 422 423
}
#endif

424 425
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;

I
Ingo Molnar 已提交
426
static void
427
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
I
Ingo Molnar 已提交
428
{
429 430 431 432 433 434 435 436
	unsigned long state;

	if (rq->curr == p) {
		SEQ_printf(m, ">R");
	} else {
		state = p->state ? __ffs(p->state) + 1 : 0;
		SEQ_printf(m, " %c", state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
	}
I
Ingo Molnar 已提交
437

I
Ingo Molnar 已提交
438
	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
439
		p->comm, task_pid_nr(p),
I
Ingo Molnar 已提交
440
		SPLIT_NS(p->se.vruntime),
I
Ingo Molnar 已提交
441
		(long long)(p->nvcsw + p->nivcsw),
442
		p->prio);
443

444
	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
445
		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
446
		SPLIT_NS(p->se.sum_exec_runtime),
447
		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
448

I
Ingo Molnar 已提交
449
#ifdef CONFIG_NUMA_BALANCING
450
	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
I
Ingo Molnar 已提交
451
#endif
452 453 454
#ifdef CONFIG_CGROUP_SCHED
	SEQ_printf(m, " %s", task_group_path(task_group(p)));
#endif
455 456

	SEQ_printf(m, "\n");
I
Ingo Molnar 已提交
457 458
}

459
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
I
Ingo Molnar 已提交
460 461 462 463 464
{
	struct task_struct *g, *p;

	SEQ_printf(m,
	"\nrunnable tasks:\n"
465
	" S           task   PID         tree-key  switches  prio"
466
	"     wait-time             sum-exec        sum-sleep\n"
467
	"-------------------------------------------------------"
468
	"----------------------------------------------------\n");
I
Ingo Molnar 已提交
469

470
	rcu_read_lock();
471
	for_each_process_thread(g, p) {
I
Ingo Molnar 已提交
472
		if (task_cpu(p) != rq_cpu)
I
Ingo Molnar 已提交
473 474
			continue;

475
		print_task(m, rq, p);
476
	}
477
	rcu_read_unlock();
I
Ingo Molnar 已提交
478 479
}

480
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
I
Ingo Molnar 已提交
481
{
I
Ingo Molnar 已提交
482 483
	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
		spread, rq0_min_vruntime, spread0;
484
	struct rq *rq = cpu_rq(cpu);
I
Ingo Molnar 已提交
485 486 487
	struct sched_entity *last;
	unsigned long flags;

488 489 490
#ifdef CONFIG_FAIR_GROUP_SCHED
	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
#else
491
	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
492
#endif
I
Ingo Molnar 已提交
493 494
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
			SPLIT_NS(cfs_rq->exec_clock));
I
Ingo Molnar 已提交
495

496
	raw_spin_lock_irqsave(&rq->lock, flags);
I
Ingo Molnar 已提交
497
	if (cfs_rq->rb_leftmost)
498
		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
I
Ingo Molnar 已提交
499 500 501
	last = __pick_last_entity(cfs_rq);
	if (last)
		max_vruntime = last->vruntime;
P
Peter Zijlstra 已提交
502
	min_vruntime = cfs_rq->min_vruntime;
503
	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
504
	raw_spin_unlock_irqrestore(&rq->lock, flags);
I
Ingo Molnar 已提交
505 506 507 508 509 510
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
			SPLIT_NS(MIN_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
			SPLIT_NS(min_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
			SPLIT_NS(max_vruntime));
I
Ingo Molnar 已提交
511
	spread = max_vruntime - MIN_vruntime;
I
Ingo Molnar 已提交
512 513
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
			SPLIT_NS(spread));
I
Ingo Molnar 已提交
514
	spread0 = min_vruntime - rq0_min_vruntime;
I
Ingo Molnar 已提交
515 516
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
			SPLIT_NS(spread0));
P
Peter Zijlstra 已提交
517
	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
P
Peter Zijlstra 已提交
518
			cfs_rq->nr_spread_over);
519
	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
P
Peter Zijlstra 已提交
520
	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
521
#ifdef CONFIG_SMP
522 523
	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
			cfs_rq->avg.load_avg);
524 525
	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
			cfs_rq->runnable_load_avg);
526 527 528 529 530 531
	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
			cfs_rq->avg.util_avg);
	SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
			atomic_long_read(&cfs_rq->removed_load_avg));
	SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
			atomic_long_read(&cfs_rq->removed_util_avg));
532
#ifdef CONFIG_FAIR_GROUP_SCHED
533 534
	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
			cfs_rq->tg_load_avg_contrib);
535 536
	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
			atomic_long_read(&cfs_rq->tg->load_avg));
537
#endif
538
#endif
539 540 541 542 543 544
#ifdef CONFIG_CFS_BANDWIDTH
	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
			cfs_rq->throttled);
	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
			cfs_rq->throttle_count);
#endif
P
Peter Zijlstra 已提交
545

546
#ifdef CONFIG_FAIR_GROUP_SCHED
547
	print_cfs_group_stats(m, cpu, cfs_rq->tg);
548
#endif
I
Ingo Molnar 已提交
549 550
}

551 552
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
553 554 555
#ifdef CONFIG_RT_GROUP_SCHED
	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
#else
556
	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
557
#endif
558 559 560

#define P(x) \
	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
561 562
#define PU(x) \
	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
563 564 565
#define PN(x) \
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))

566 567 568 569
	PU(rt_nr_running);
#ifdef CONFIG_SMP
	PU(rt_nr_migratory);
#endif
570 571 572 573 574
	P(rt_throttled);
	PN(rt_time);
	PN(rt_runtime);

#undef PN
575
#undef PU
576 577 578
#undef P
}

579 580
void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
{
581 582
	struct dl_bw *dl_bw;

583
	SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
584 585 586 587 588

#define PU(x) \
	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))

	PU(dl_nr_running);
589
#ifdef CONFIG_SMP
590
	PU(dl_nr_migratory);
591 592 593 594 595 596
	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
#else
	dl_bw = &dl_rq->dl_bw;
#endif
	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
597 598

#undef PU
599 600
}

601 602
extern __read_mostly int sched_clock_running;

603
static void print_cpu(struct seq_file *m, int cpu)
I
Ingo Molnar 已提交
604
{
605
	struct rq *rq = cpu_rq(cpu);
606
	unsigned long flags;
I
Ingo Molnar 已提交
607 608 609 610 611

#ifdef CONFIG_X86
	{
		unsigned int freq = cpu_khz ? : 1;

612
		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
I
Ingo Molnar 已提交
613 614 615
			   cpu, freq / 1000, (freq % 1000));
	}
#else
616
	SEQ_printf(m, "cpu#%d\n", cpu);
I
Ingo Molnar 已提交
617 618
#endif

619 620 621 622 623 624 625 626
#define P(x)								\
do {									\
	if (sizeof(rq->x) == 4)						\
		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
	else								\
		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
} while (0)

I
Ingo Molnar 已提交
627 628
#define PN(x) \
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
I
Ingo Molnar 已提交
629 630 631

	P(nr_running);
	SEQ_printf(m, "  .%-30s: %lu\n", "load",
632
		   rq->load.weight);
I
Ingo Molnar 已提交
633 634 635
	P(nr_switches);
	P(nr_load_updates);
	P(nr_uninterruptible);
I
Ingo Molnar 已提交
636
	PN(next_balance);
637
	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
I
Ingo Molnar 已提交
638
	PN(clock);
639
	PN(clock_task);
I
Ingo Molnar 已提交
640 641 642 643 644 645
	P(cpu_load[0]);
	P(cpu_load[1]);
	P(cpu_load[2]);
	P(cpu_load[3]);
	P(cpu_load[4]);
#undef P
I
Ingo Molnar 已提交
646
#undef PN
I
Ingo Molnar 已提交
647

M
Mike Galbraith 已提交
648
#ifdef CONFIG_SMP
649
#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
M
Mike Galbraith 已提交
650
	P64(avg_idle);
651
	P64(max_idle_balance_cost);
652
#undef P64
M
Mike Galbraith 已提交
653
#endif
P
Peter Zijlstra 已提交
654

655
#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
656 657 658 659 660 661 662
	if (schedstat_enabled()) {
		P(yld_count);
		P(sched_count);
		P(sched_goidle);
		P(ttwu_count);
		P(ttwu_local);
	}
P
Peter Zijlstra 已提交
663
#undef P
664

665
	spin_lock_irqsave(&sched_debug_lock, flags);
666
	print_cfs_stats(m, cpu);
667
	print_rt_stats(m, cpu);
668
	print_dl_stats(m, cpu);
I
Ingo Molnar 已提交
669

670
	print_rq(m, rq, cpu);
671
	spin_unlock_irqrestore(&sched_debug_lock, flags);
672
	SEQ_printf(m, "\n");
I
Ingo Molnar 已提交
673 674
}

675 676 677 678 679 680
static const char *sched_tunable_scaling_names[] = {
	"none",
	"logaritmic",
	"linear"
};

681
static void sched_debug_header(struct seq_file *m)
I
Ingo Molnar 已提交
682
{
683 684
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;
I
Ingo Molnar 已提交
685

686 687 688 689 690 691
	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

I
Ingo Molnar 已提交
692
	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
I
Ingo Molnar 已提交
693 694 695 696
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

697 698 699 700 701 702 703 704 705
#define P(x) \
	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(ktime);
	PN(sched_clk);
	PN(cpu_clk);
	P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
706
	P(sched_clock_stable());
707 708 709 710 711 712
#endif
#undef PN
#undef P

	SEQ_printf(m, "\n");
	SEQ_printf(m, "sysctl_sched\n");
I
Ingo Molnar 已提交
713

I
Ingo Molnar 已提交
714
#define P(x) \
715
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
I
Ingo Molnar 已提交
716
#define PN(x) \
717
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
I
Ingo Molnar 已提交
718
	PN(sysctl_sched_latency);
719
	PN(sysctl_sched_min_granularity);
I
Ingo Molnar 已提交
720
	PN(sysctl_sched_wakeup_granularity);
721
	P(sysctl_sched_child_runs_first);
I
Ingo Molnar 已提交
722 723 724 725
	P(sysctl_sched_features);
#undef PN
#undef P

726 727
	SEQ_printf(m, "  .%-40s: %d (%s)\n",
		"sysctl_sched_tunable_scaling",
728 729
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
730 731
	SEQ_printf(m, "\n");
}
732

733 734 735
static int sched_debug_show(struct seq_file *m, void *v)
{
	int cpu = (unsigned long)(v - 2);
I
Ingo Molnar 已提交
736

737 738 739 740
	if (cpu != -1)
		print_cpu(m, cpu);
	else
		sched_debug_header(m);
I
Ingo Molnar 已提交
741 742 743 744

	return 0;
}

745
void sysrq_sched_debug_show(void)
I
Ingo Molnar 已提交
746
{
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
	int cpu;

	sched_debug_header(NULL);
	for_each_online_cpu(cpu)
		print_cpu(NULL, cpu);

}

/*
 * This itererator needs some explanation.
 * It returns 1 for the header position.
 * This means 2 is cpu 0.
 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
 * to use cpumask_* to iterate over the cpus.
 */
static void *sched_debug_start(struct seq_file *file, loff_t *offset)
{
	unsigned long n = *offset;

	if (n == 0)
		return (void *) 1;

	n--;

	if (n > 0)
		n = cpumask_next(n - 1, cpu_online_mask);
	else
		n = cpumask_first(cpu_online_mask);

	*offset = n + 1;

	if (n < nr_cpu_ids)
		return (void *)(unsigned long)(n + 2);
	return NULL;
}

static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
{
	(*offset)++;
	return sched_debug_start(file, offset);
}

static void sched_debug_stop(struct seq_file *file, void *data)
{
}

static const struct seq_operations sched_debug_sops = {
	.start = sched_debug_start,
	.next = sched_debug_next,
	.stop = sched_debug_stop,
	.show = sched_debug_show,
};

static int sched_debug_release(struct inode *inode, struct file *file)
{
	seq_release(inode, file);

	return 0;
I
Ingo Molnar 已提交
805 806 807 808
}

static int sched_debug_open(struct inode *inode, struct file *filp)
{
809 810 811 812 813
	int ret = 0;

	ret = seq_open(filp, &sched_debug_sops);

	return ret;
I
Ingo Molnar 已提交
814 815
}

816
static const struct file_operations sched_debug_fops = {
I
Ingo Molnar 已提交
817 818 819
	.open		= sched_debug_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
820
	.release	= sched_debug_release,
I
Ingo Molnar 已提交
821 822 823 824 825 826
};

static int __init init_sched_debug_procfs(void)
{
	struct proc_dir_entry *pe;

827
	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
I
Ingo Molnar 已提交
828 829 830 831 832 833 834
	if (!pe)
		return -ENOMEM;
	return 0;
}

__initcall(init_sched_debug_procfs);

I
Ingo Molnar 已提交
835 836 837 838 839 840 841 842 843 844
#define __P(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
#define P(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))


845 846 847 848 849 850 851 852 853 854 855
#ifdef CONFIG_NUMA_BALANCING
void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
		unsigned long tpf, unsigned long gsf, unsigned long gpf)
{
	SEQ_printf(m, "numa_faults node=%d ", node);
	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
}
#endif


I
Ingo Molnar 已提交
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
static void sched_show_numa(struct task_struct *p, struct seq_file *m)
{
#ifdef CONFIG_NUMA_BALANCING
	struct mempolicy *pol;

	if (p->mm)
		P(mm->numa_scan_seq);

	task_lock(p);
	pol = p->mempolicy;
	if (pol && !(pol->flags & MPOL_F_MORON))
		pol = NULL;
	mpol_get(pol);
	task_unlock(p);

871 872 873 874 875 876
	P(numa_pages_migrated);
	P(numa_preferred_nid);
	P(total_numa_faults);
	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
			task_node(p), task_numa_group_id(p));
	show_numa_stats(p, m);
I
Ingo Molnar 已提交
877 878 879 880
	mpol_put(pol);
#endif
}

881 882
void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
						  struct seq_file *m)
I
Ingo Molnar 已提交
883
{
884
	unsigned long nr_switches;
I
Ingo Molnar 已提交
885

886
	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
887
						get_nr_threads(p));
888
	SEQ_printf(m,
889 890
		"---------------------------------------------------------"
		"----------\n");
891
#define __P(F) \
892
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
I
Ingo Molnar 已提交
893
#define P(F) \
894
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
895 896
#define P_SCHEDSTAT(F) \
	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
897
#define __PN(F) \
898
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
I
Ingo Molnar 已提交
899
#define PN(F) \
900
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
901 902
#define PN_SCHEDSTAT(F) \
	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
I
Ingo Molnar 已提交
903

I
Ingo Molnar 已提交
904 905 906
	PN(se.exec_start);
	PN(se.vruntime);
	PN(se.sum_exec_runtime);
I
Ingo Molnar 已提交
907

908 909 910 911
	nr_switches = p->nvcsw + p->nivcsw;

	P(se.nr_migrations);

912
	if (schedstat_enabled()) {
913 914
		u64 avg_atom, avg_per_cpu;

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
		PN_SCHEDSTAT(se.statistics.wait_start);
		PN_SCHEDSTAT(se.statistics.sleep_start);
		PN_SCHEDSTAT(se.statistics.block_start);
		PN_SCHEDSTAT(se.statistics.sleep_max);
		PN_SCHEDSTAT(se.statistics.block_max);
		PN_SCHEDSTAT(se.statistics.exec_max);
		PN_SCHEDSTAT(se.statistics.slice_max);
		PN_SCHEDSTAT(se.statistics.wait_max);
		PN_SCHEDSTAT(se.statistics.wait_sum);
		P_SCHEDSTAT(se.statistics.wait_count);
		PN_SCHEDSTAT(se.statistics.iowait_sum);
		P_SCHEDSTAT(se.statistics.iowait_count);
		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
		P_SCHEDSTAT(se.statistics.nr_wakeups);
		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
942

943 944
		avg_atom = p->se.sum_exec_runtime;
		if (nr_switches)
945
			avg_atom = div64_ul(avg_atom, nr_switches);
946 947 948 949
		else
			avg_atom = -1LL;

		avg_per_cpu = p->se.sum_exec_runtime;
950
		if (p->se.nr_migrations) {
R
Roman Zippel 已提交
951 952
			avg_per_cpu = div64_u64(avg_per_cpu,
						p->se.nr_migrations);
953
		} else {
954
			avg_per_cpu = -1LL;
955
		}
956 957 958 959

		__PN(avg_atom);
		__PN(avg_per_cpu);
	}
960

961
	__P(nr_switches);
962
	SEQ_printf(m, "%-45s:%21Ld\n",
963
		   "nr_voluntary_switches", (long long)p->nvcsw);
964
	SEQ_printf(m, "%-45s:%21Ld\n",
965 966
		   "nr_involuntary_switches", (long long)p->nivcsw);

I
Ingo Molnar 已提交
967
	P(se.load.weight);
968
#ifdef CONFIG_SMP
969 970 971 972 973
	P(se.avg.load_sum);
	P(se.avg.util_sum);
	P(se.avg.load_avg);
	P(se.avg.util_avg);
	P(se.avg.last_update_time);
974
#endif
I
Ingo Molnar 已提交
975 976
	P(policy);
	P(prio);
977 978 979 980
	if (p->policy == SCHED_DEADLINE) {
		P(dl.runtime);
		P(dl.deadline);
	}
981
#undef PN_SCHEDSTAT
I
Ingo Molnar 已提交
982
#undef PN
983
#undef __PN
984
#undef P_SCHEDSTAT
985 986
#undef P
#undef __P
I
Ingo Molnar 已提交
987 988

	{
989
		unsigned int this_cpu = raw_smp_processor_id();
I
Ingo Molnar 已提交
990 991
		u64 t0, t1;

992 993
		t0 = cpu_clock(this_cpu);
		t1 = cpu_clock(this_cpu);
994
		SEQ_printf(m, "%-45s:%21Ld\n",
I
Ingo Molnar 已提交
995 996
			   "clock-delta", (long long)(t1-t0));
	}
I
Ingo Molnar 已提交
997 998

	sched_show_numa(p, m);
I
Ingo Molnar 已提交
999 1000 1001 1002
}

void proc_sched_set_task(struct task_struct *p)
{
I
Ingo Molnar 已提交
1003
#ifdef CONFIG_SCHEDSTATS
1004
	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
I
Ingo Molnar 已提交
1005
#endif
I
Ingo Molnar 已提交
1006
}