debug.c 13.5 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * kernel/sched/debug.c
I
Ingo Molnar 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * Print the CFS rbtree
 *
 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/utsname.h>

19 20
#include "sched.h"

21 22
static DEFINE_SPINLOCK(sched_debug_lock);

I
Ingo Molnar 已提交
23 24 25 26 27 28 29 30 31 32 33 34
/*
 * This allows printing both to /proc/sched_debug and
 * to the console
 */
#define SEQ_printf(m, x...)			\
 do {						\
	if (m)					\
		seq_printf(m, x);		\
	else					\
		printk(x);			\
 } while (0)

I
Ingo Molnar 已提交
35 36 37
/*
 * Ease the printing of nsec fields:
 */
I
Ingo Molnar 已提交
38
static long long nsec_high(unsigned long long nsec)
I
Ingo Molnar 已提交
39
{
I
Ingo Molnar 已提交
40
	if ((long long)nsec < 0) {
I
Ingo Molnar 已提交
41 42 43 44 45 46 47 48 49
		nsec = -nsec;
		do_div(nsec, 1000000);
		return -nsec;
	}
	do_div(nsec, 1000000);

	return nsec;
}

I
Ingo Molnar 已提交
50
static unsigned long nsec_low(unsigned long long nsec)
I
Ingo Molnar 已提交
51
{
I
Ingo Molnar 已提交
52
	if ((long long)nsec < 0)
I
Ingo Molnar 已提交
53 54 55 56 57 58 59
		nsec = -nsec;

	return do_div(nsec, 1000000);
}

#define SPLIT_NS(x) nsec_high(x), nsec_low(x)

60
#ifdef CONFIG_FAIR_GROUP_SCHED
61
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
62 63 64 65 66 67 68 69
{
	struct sched_entity *se = tg->se[cpu];

#define P(F) \
	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
#define PN(F) \
	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))

70 71 72 73 74 75 76 77
	if (!se) {
		struct sched_avg *avg = &cpu_rq(cpu)->avg;
		P(avg->runnable_avg_sum);
		P(avg->runnable_avg_period);
		return;
	}


78 79 80 81
	PN(se->exec_start);
	PN(se->vruntime);
	PN(se->sum_exec_runtime);
#ifdef CONFIG_SCHEDSTATS
82 83 84 85 86 87 88 89 90 91
	PN(se->statistics.wait_start);
	PN(se->statistics.sleep_start);
	PN(se->statistics.block_start);
	PN(se->statistics.sleep_max);
	PN(se->statistics.block_max);
	PN(se->statistics.exec_max);
	PN(se->statistics.slice_max);
	PN(se->statistics.wait_max);
	PN(se->statistics.wait_sum);
	P(se->statistics.wait_count);
92 93
#endif
	P(se->load.weight);
94 95 96
#ifdef CONFIG_SMP
	P(se->avg.runnable_avg_sum);
	P(se->avg.runnable_avg_period);
97
	P(se->avg.load_avg_contrib);
98
	P(se->avg.decay_count);
99
#endif
100 101 102 103 104
#undef PN
#undef P
}
#endif

105 106 107 108 109
#ifdef CONFIG_CGROUP_SCHED
static char group_path[PATH_MAX];

static char *task_group_path(struct task_group *tg)
{
110 111 112
	if (autogroup_path(tg, group_path, PATH_MAX))
		return group_path;

113 114 115 116 117
	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
	return group_path;
}
#endif

I
Ingo Molnar 已提交
118
static void
119
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
I
Ingo Molnar 已提交
120 121 122 123 124 125
{
	if (rq->curr == p)
		SEQ_printf(m, "R");
	else
		SEQ_printf(m, " ");

I
Ingo Molnar 已提交
126
	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
I
Ingo Molnar 已提交
127
		p->comm, p->pid,
I
Ingo Molnar 已提交
128
		SPLIT_NS(p->se.vruntime),
I
Ingo Molnar 已提交
129
		(long long)(p->nvcsw + p->nivcsw),
130
		p->prio);
I
Ingo Molnar 已提交
131
#ifdef CONFIG_SCHEDSTATS
132
	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
I
Ingo Molnar 已提交
133 134
		SPLIT_NS(p->se.vruntime),
		SPLIT_NS(p->se.sum_exec_runtime),
135
		SPLIT_NS(p->se.statistics.sum_sleep_runtime));
I
Ingo Molnar 已提交
136
#else
137
	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
I
Ingo Molnar 已提交
138
		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
I
Ingo Molnar 已提交
139
#endif
140 141 142
#ifdef CONFIG_CGROUP_SCHED
	SEQ_printf(m, " %s", task_group_path(task_group(p)));
#endif
143 144

	SEQ_printf(m, "\n");
I
Ingo Molnar 已提交
145 146
}

147
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
I
Ingo Molnar 已提交
148 149
{
	struct task_struct *g, *p;
P
Peter Zijlstra 已提交
150
	unsigned long flags;
I
Ingo Molnar 已提交
151 152 153

	SEQ_printf(m,
	"\nrunnable tasks:\n"
154 155
	"            task   PID         tree-key  switches  prio"
	"     exec-runtime         sum-exec        sum-sleep\n"
156
	"------------------------------------------------------"
157
	"----------------------------------------------------\n");
I
Ingo Molnar 已提交
158

P
Peter Zijlstra 已提交
159
	read_lock_irqsave(&tasklist_lock, flags);
I
Ingo Molnar 已提交
160 161

	do_each_thread(g, p) {
P
Peter Zijlstra 已提交
162
		if (!p->on_rq || task_cpu(p) != rq_cpu)
I
Ingo Molnar 已提交
163 164
			continue;

165
		print_task(m, rq, p);
I
Ingo Molnar 已提交
166 167
	} while_each_thread(g, p);

P
Peter Zijlstra 已提交
168
	read_unlock_irqrestore(&tasklist_lock, flags);
I
Ingo Molnar 已提交
169 170
}

171
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
I
Ingo Molnar 已提交
172
{
I
Ingo Molnar 已提交
173 174
	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
		spread, rq0_min_vruntime, spread0;
175
	struct rq *rq = cpu_rq(cpu);
I
Ingo Molnar 已提交
176 177 178
	struct sched_entity *last;
	unsigned long flags;

179 180 181
#ifdef CONFIG_FAIR_GROUP_SCHED
	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
#else
182
	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
183
#endif
I
Ingo Molnar 已提交
184 185
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
			SPLIT_NS(cfs_rq->exec_clock));
I
Ingo Molnar 已提交
186

187
	raw_spin_lock_irqsave(&rq->lock, flags);
I
Ingo Molnar 已提交
188
	if (cfs_rq->rb_leftmost)
189
		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
I
Ingo Molnar 已提交
190 191 192
	last = __pick_last_entity(cfs_rq);
	if (last)
		max_vruntime = last->vruntime;
P
Peter Zijlstra 已提交
193
	min_vruntime = cfs_rq->min_vruntime;
194
	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
195
	raw_spin_unlock_irqrestore(&rq->lock, flags);
I
Ingo Molnar 已提交
196 197 198 199 200 201
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
			SPLIT_NS(MIN_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
			SPLIT_NS(min_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
			SPLIT_NS(max_vruntime));
I
Ingo Molnar 已提交
202
	spread = max_vruntime - MIN_vruntime;
I
Ingo Molnar 已提交
203 204
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
			SPLIT_NS(spread));
I
Ingo Molnar 已提交
205
	spread0 = min_vruntime - rq0_min_vruntime;
I
Ingo Molnar 已提交
206 207
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
			SPLIT_NS(spread0));
P
Peter Zijlstra 已提交
208
	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
P
Peter Zijlstra 已提交
209
			cfs_rq->nr_spread_over);
210
	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
P
Peter Zijlstra 已提交
211
	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
212 213
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP
214
	SEQ_printf(m, "  .%-30s: %ld\n", "runnable_load_avg",
215
			cfs_rq->runnable_load_avg);
216
	SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
217
			cfs_rq->blocked_load_avg);
218 219
	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg",
			(unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
220 221
	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",
			cfs_rq->tg_load_contrib);
222 223 224 225
	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
			cfs_rq->tg_runnable_contrib);
	SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
			atomic_read(&cfs_rq->tg->runnable_avg));
226
#endif
P
Peter Zijlstra 已提交
227

228
	print_cfs_group_stats(m, cpu, cfs_rq->tg);
229
#endif
I
Ingo Molnar 已提交
230 231
}

232 233
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
234 235 236
#ifdef CONFIG_RT_GROUP_SCHED
	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
#else
237
	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
238
#endif
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253

#define P(x) \
	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
#define PN(x) \
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))

	P(rt_nr_running);
	P(rt_throttled);
	PN(rt_time);
	PN(rt_runtime);

#undef PN
#undef P
}

254 255
extern __read_mostly int sched_clock_running;

256
static void print_cpu(struct seq_file *m, int cpu)
I
Ingo Molnar 已提交
257
{
258
	struct rq *rq = cpu_rq(cpu);
259
	unsigned long flags;
I
Ingo Molnar 已提交
260 261 262 263 264

#ifdef CONFIG_X86
	{
		unsigned int freq = cpu_khz ? : 1;

265
		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
I
Ingo Molnar 已提交
266 267 268
			   cpu, freq / 1000, (freq % 1000));
	}
#else
269
	SEQ_printf(m, "cpu#%d\n", cpu);
I
Ingo Molnar 已提交
270 271
#endif

272 273 274 275 276 277 278 279
#define P(x)								\
do {									\
	if (sizeof(rq->x) == 4)						\
		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
	else								\
		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
} while (0)

I
Ingo Molnar 已提交
280 281
#define PN(x) \
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
I
Ingo Molnar 已提交
282 283 284

	P(nr_running);
	SEQ_printf(m, "  .%-30s: %lu\n", "load",
285
		   rq->load.weight);
I
Ingo Molnar 已提交
286 287 288
	P(nr_switches);
	P(nr_load_updates);
	P(nr_uninterruptible);
I
Ingo Molnar 已提交
289
	PN(next_balance);
I
Ingo Molnar 已提交
290
	P(curr->pid);
I
Ingo Molnar 已提交
291
	PN(clock);
I
Ingo Molnar 已提交
292 293 294 295 296 297
	P(cpu_load[0]);
	P(cpu_load[1]);
	P(cpu_load[2]);
	P(cpu_load[3]);
	P(cpu_load[4]);
#undef P
I
Ingo Molnar 已提交
298
#undef PN
I
Ingo Molnar 已提交
299

P
Peter Zijlstra 已提交
300 301
#ifdef CONFIG_SCHEDSTATS
#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
M
Mike Galbraith 已提交
302
#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
P
Peter Zijlstra 已提交
303 304 305 306 307

	P(yld_count);

	P(sched_count);
	P(sched_goidle);
M
Mike Galbraith 已提交
308 309 310
#ifdef CONFIG_SMP
	P64(avg_idle);
#endif
P
Peter Zijlstra 已提交
311 312 313 314 315

	P(ttwu_count);
	P(ttwu_local);

#undef P
316
#undef P64
P
Peter Zijlstra 已提交
317
#endif
318
	spin_lock_irqsave(&sched_debug_lock, flags);
319
	print_cfs_stats(m, cpu);
320
	print_rt_stats(m, cpu);
I
Ingo Molnar 已提交
321

322
	rcu_read_lock();
323
	print_rq(m, rq, cpu);
324 325
	rcu_read_unlock();
	spin_unlock_irqrestore(&sched_debug_lock, flags);
326
	SEQ_printf(m, "\n");
I
Ingo Molnar 已提交
327 328
}

329 330 331 332 333 334
static const char *sched_tunable_scaling_names[] = {
	"none",
	"logaritmic",
	"linear"
};

335
static void sched_debug_header(struct seq_file *m)
I
Ingo Molnar 已提交
336
{
337 338
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;
I
Ingo Molnar 已提交
339

340 341 342 343 344 345 346
	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
I
Ingo Molnar 已提交
347 348 349 350
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
#define P(x) \
	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(ktime);
	PN(sched_clk);
	PN(cpu_clk);
	P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
	P(sched_clock_stable);
#endif
#undef PN
#undef P

	SEQ_printf(m, "\n");
	SEQ_printf(m, "sysctl_sched\n");
I
Ingo Molnar 已提交
367

I
Ingo Molnar 已提交
368
#define P(x) \
369
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
I
Ingo Molnar 已提交
370
#define PN(x) \
371
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
I
Ingo Molnar 已提交
372
	PN(sysctl_sched_latency);
373
	PN(sysctl_sched_min_granularity);
I
Ingo Molnar 已提交
374
	PN(sysctl_sched_wakeup_granularity);
375
	P(sysctl_sched_child_runs_first);
I
Ingo Molnar 已提交
376 377 378 379
	P(sysctl_sched_features);
#undef PN
#undef P

380 381
	SEQ_printf(m, "  .%-40s: %d (%s)\n",
		"sysctl_sched_tunable_scaling",
382 383
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
384 385
	SEQ_printf(m, "\n");
}
386

387 388 389
static int sched_debug_show(struct seq_file *m, void *v)
{
	int cpu = (unsigned long)(v - 2);
I
Ingo Molnar 已提交
390

391 392 393 394
	if (cpu != -1)
		print_cpu(m, cpu);
	else
		sched_debug_header(m);
I
Ingo Molnar 已提交
395 396 397 398

	return 0;
}

399
void sysrq_sched_debug_show(void)
I
Ingo Molnar 已提交
400
{
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
	int cpu;

	sched_debug_header(NULL);
	for_each_online_cpu(cpu)
		print_cpu(NULL, cpu);

}

/*
 * This itererator needs some explanation.
 * It returns 1 for the header position.
 * This means 2 is cpu 0.
 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
 * to use cpumask_* to iterate over the cpus.
 */
static void *sched_debug_start(struct seq_file *file, loff_t *offset)
{
	unsigned long n = *offset;

	if (n == 0)
		return (void *) 1;

	n--;

	if (n > 0)
		n = cpumask_next(n - 1, cpu_online_mask);
	else
		n = cpumask_first(cpu_online_mask);

	*offset = n + 1;

	if (n < nr_cpu_ids)
		return (void *)(unsigned long)(n + 2);
	return NULL;
}

static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
{
	(*offset)++;
	return sched_debug_start(file, offset);
}

static void sched_debug_stop(struct seq_file *file, void *data)
{
}

static const struct seq_operations sched_debug_sops = {
	.start = sched_debug_start,
	.next = sched_debug_next,
	.stop = sched_debug_stop,
	.show = sched_debug_show,
};

static int sched_debug_release(struct inode *inode, struct file *file)
{
	seq_release(inode, file);

	return 0;
I
Ingo Molnar 已提交
459 460 461 462
}

static int sched_debug_open(struct inode *inode, struct file *filp)
{
463 464 465 466 467
	int ret = 0;

	ret = seq_open(filp, &sched_debug_sops);

	return ret;
I
Ingo Molnar 已提交
468 469
}

470
static const struct file_operations sched_debug_fops = {
I
Ingo Molnar 已提交
471 472 473
	.open		= sched_debug_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
474
	.release	= sched_debug_release,
I
Ingo Molnar 已提交
475 476 477 478 479 480
};

static int __init init_sched_debug_procfs(void)
{
	struct proc_dir_entry *pe;

481
	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
I
Ingo Molnar 已提交
482 483 484 485 486 487 488 489 490
	if (!pe)
		return -ENOMEM;
	return 0;
}

__initcall(init_sched_debug_procfs);

void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
491
	unsigned long nr_switches;
I
Ingo Molnar 已提交
492

493 494
	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
						get_nr_threads(p));
495 496
	SEQ_printf(m,
		"---------------------------------------------------------\n");
497 498
#define __P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
I
Ingo Molnar 已提交
499
#define P(F) \
500
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
501 502
#define __PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
I
Ingo Molnar 已提交
503
#define PN(F) \
504
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
I
Ingo Molnar 已提交
505

I
Ingo Molnar 已提交
506 507 508
	PN(se.exec_start);
	PN(se.vruntime);
	PN(se.sum_exec_runtime);
I
Ingo Molnar 已提交
509

510 511
	nr_switches = p->nvcsw + p->nivcsw;

I
Ingo Molnar 已提交
512
#ifdef CONFIG_SCHEDSTATS
513 514 515 516 517 518 519 520 521 522 523 524
	PN(se.statistics.wait_start);
	PN(se.statistics.sleep_start);
	PN(se.statistics.block_start);
	PN(se.statistics.sleep_max);
	PN(se.statistics.block_max);
	PN(se.statistics.exec_max);
	PN(se.statistics.slice_max);
	PN(se.statistics.wait_max);
	PN(se.statistics.wait_sum);
	P(se.statistics.wait_count);
	PN(se.statistics.iowait_sum);
	P(se.statistics.iowait_count);
525
	P(se.nr_migrations);
526 527 528 529 530 531 532 533 534 535 536 537 538 539
	P(se.statistics.nr_migrations_cold);
	P(se.statistics.nr_failed_migrations_affine);
	P(se.statistics.nr_failed_migrations_running);
	P(se.statistics.nr_failed_migrations_hot);
	P(se.statistics.nr_forced_migrations);
	P(se.statistics.nr_wakeups);
	P(se.statistics.nr_wakeups_sync);
	P(se.statistics.nr_wakeups_migrate);
	P(se.statistics.nr_wakeups_local);
	P(se.statistics.nr_wakeups_remote);
	P(se.statistics.nr_wakeups_affine);
	P(se.statistics.nr_wakeups_affine_attempts);
	P(se.statistics.nr_wakeups_passive);
	P(se.statistics.nr_wakeups_idle);
540 541 542 543 544 545 546 547 548 549 550

	{
		u64 avg_atom, avg_per_cpu;

		avg_atom = p->se.sum_exec_runtime;
		if (nr_switches)
			do_div(avg_atom, nr_switches);
		else
			avg_atom = -1LL;

		avg_per_cpu = p->se.sum_exec_runtime;
551
		if (p->se.nr_migrations) {
R
Roman Zippel 已提交
552 553
			avg_per_cpu = div64_u64(avg_per_cpu,
						p->se.nr_migrations);
554
		} else {
555
			avg_per_cpu = -1LL;
556
		}
557 558 559 560

		__PN(avg_atom);
		__PN(avg_per_cpu);
	}
I
Ingo Molnar 已提交
561
#endif
562
	__P(nr_switches);
563
	SEQ_printf(m, "%-35s:%21Ld\n",
564 565 566 567
		   "nr_voluntary_switches", (long long)p->nvcsw);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_involuntary_switches", (long long)p->nivcsw);

I
Ingo Molnar 已提交
568 569 570
	P(se.load.weight);
	P(policy);
	P(prio);
I
Ingo Molnar 已提交
571
#undef PN
572 573 574
#undef __PN
#undef P
#undef __P
I
Ingo Molnar 已提交
575 576

	{
577
		unsigned int this_cpu = raw_smp_processor_id();
I
Ingo Molnar 已提交
578 579
		u64 t0, t1;

580 581
		t0 = cpu_clock(this_cpu);
		t1 = cpu_clock(this_cpu);
582
		SEQ_printf(m, "%-35s:%21Ld\n",
I
Ingo Molnar 已提交
583 584 585 586 587 588
			   "clock-delta", (long long)(t1-t0));
	}
}

void proc_sched_set_task(struct task_struct *p)
{
I
Ingo Molnar 已提交
589
#ifdef CONFIG_SCHEDSTATS
590
	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
I
Ingo Molnar 已提交
591
#endif
I
Ingo Molnar 已提交
592
}