提交 52f17b6c 编写于 作者: C Chandra Seetharaman 提交者: Linus Torvalds

[PATCH] per-task-delay-accounting: cpu delay collection via schedstats

Make the task-related schedstats functions callable by delay accounting even
if schedstats collection isn't turned on.  This removes the dependency of
delay accounting on schedstats.
Signed-off-by: NChandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: NShailabh Nagar <nagar@watson.ibm.com>
Signed-off-by: NBalbir Singh <balbir@in.ibm.com>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Peter Chubb <peterc@gelato.unsw.edu.au>
Cc: Erich Focht <efocht@ess.nec.de>
Cc: Levent Serinol <lserinol@gmail.com>
Cc: Jay Lan <jlan@engr.sgi.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 0ff92245
......@@ -537,7 +537,7 @@ extern struct user_struct root_user;
struct backing_dev_info;
struct reclaim_state;
#ifdef CONFIG_SCHEDSTATS
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info {
/* cumulative counters */
unsigned long cpu_time, /* time spent on the cpu */
......@@ -548,9 +548,11 @@ struct sched_info {
unsigned long last_arrival, /* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
#ifdef CONFIG_SCHEDSTATS
extern struct file_operations proc_schedstat_operations;
#endif
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
......@@ -580,7 +582,19 @@ struct task_delay_info {
u32 swapin_count; /* total count of the number of swapin block */
/* io operations performed */
};
#endif /* CONFIG_TASK_DELAY_ACCT */
static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
extern int delayacct_on;
return delayacct_on;
#else
return 0;
#endif
}
enum idle_type
{
......@@ -777,7 +791,7 @@ struct task_struct {
cpumask_t cpus_allowed;
unsigned int time_slice, first_time_slice;
#ifdef CONFIG_SCHEDSTATS
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
#endif
......
......@@ -502,9 +502,36 @@ struct file_operations proc_schedstat_operations = {
.release = single_release,
};
/*
* Expects runqueue lock to be held for atomicity of update
*/
static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
{
if (rq) {
rq->rq_sched_info.run_delay += delta_jiffies;
rq->rq_sched_info.pcnt++;
}
}
/*
* Expects runqueue lock to be held for atomicity of update
*/
static inline void
rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
{
if (rq)
rq->rq_sched_info.cpu_time += delta_jiffies;
}
# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
#else /* !CONFIG_SCHEDSTATS */
static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
{}
static inline void
rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
{}
# define schedstat_inc(rq, field) do { } while (0)
# define schedstat_add(rq, field, amt) do { } while (0)
#endif
......@@ -524,7 +551,7 @@ static inline struct rq *this_rq_lock(void)
return rq;
}
#ifdef CONFIG_SCHEDSTATS
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
/*
* Called when a process is dequeued from the active array and given
* the cpu. We should note that with the exception of interactive
......@@ -552,21 +579,16 @@ static inline void sched_info_dequeued(struct task_struct *t)
*/
static void sched_info_arrive(struct task_struct *t)
{
unsigned long now = jiffies, diff = 0;
struct rq *rq = task_rq(t);
unsigned long now = jiffies, delta_jiffies = 0;
if (t->sched_info.last_queued)
diff = now - t->sched_info.last_queued;
delta_jiffies = now - t->sched_info.last_queued;
sched_info_dequeued(t);
t->sched_info.run_delay += diff;
t->sched_info.run_delay += delta_jiffies;
t->sched_info.last_arrival = now;
t->sched_info.pcnt++;
if (!rq)
return;
rq->rq_sched_info.run_delay += diff;
rq->rq_sched_info.pcnt++;
rq_sched_info_arrive(task_rq(t), delta_jiffies);
}
/*
......@@ -586,8 +608,9 @@ static void sched_info_arrive(struct task_struct *t)
*/
static inline void sched_info_queued(struct task_struct *t)
{
if (!t->sched_info.last_queued)
t->sched_info.last_queued = jiffies;
if (unlikely(sched_info_on()))
if (!t->sched_info.last_queued)
t->sched_info.last_queued = jiffies;
}
/*
......@@ -596,13 +619,10 @@ static inline void sched_info_queued(struct task_struct *t)
*/
static inline void sched_info_depart(struct task_struct *t)
{
struct rq *rq = task_rq(t);
unsigned long diff = jiffies - t->sched_info.last_arrival;
t->sched_info.cpu_time += diff;
unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
if (rq)
rq->rq_sched_info.cpu_time += diff;
t->sched_info.cpu_time += delta_jiffies;
rq_sched_info_depart(task_rq(t), delta_jiffies);
}
/*
......@@ -611,7 +631,7 @@ static inline void sched_info_depart(struct task_struct *t)
* the idle task.) We are only called when prev != next.
*/
static inline void
sched_info_switch(struct task_struct *prev, struct task_struct *next)
__sched_info_switch(struct task_struct *prev, struct task_struct *next)
{
struct rq *rq = task_rq(prev);
......@@ -626,10 +646,16 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
if (next != rq->idle)
sched_info_arrive(next);
}
static inline void
sched_info_switch(struct task_struct *prev, struct task_struct *next)
{
if (unlikely(sched_info_on()))
__sched_info_switch(prev, next);
}
#else
#define sched_info_queued(t) do { } while (0)
#define sched_info_switch(t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS */
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
/*
* Adding/removing a task to/from a priority array:
......@@ -1531,8 +1557,9 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
INIT_LIST_HEAD(&p->run_list);
p->array = NULL;
#ifdef CONFIG_SCHEDSTATS
memset(&p->sched_info, 0, sizeof(p->sched_info));
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (unlikely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
p->oncpu = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册