提交 c66f08be 编写于 作者: M Michael Neuling 提交者: Linus Torvalds

Add scaled time to taskstats based process accounting

This adds items to the taststats struct to account for user and system
time based on scaling the CPU frequency and instruction issue rates.

Adds account_(user|system)_time_scaled callbacks which architectures
can use to account for time using this mechanism.
Signed-off-by: NMichael Neuling <mikey@neuling.org>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Jay Lan <jlan@engr.sgi.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 898eb71c
...@@ -53,7 +53,9 @@ static inline int kstat_irqs(int irq) ...@@ -53,7 +53,9 @@ static inline int kstat_irqs(int irq)
} }
extern void account_user_time(struct task_struct *, cputime_t); extern void account_user_time(struct task_struct *, cputime_t);
extern void account_user_time_scaled(struct task_struct *, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t);
extern void account_system_time_scaled(struct task_struct *, cputime_t);
extern void account_steal_time(struct task_struct *, cputime_t); extern void account_steal_time(struct task_struct *, cputime_t);
#endif /* _LINUX_KERNEL_STAT_H */ #endif /* _LINUX_KERNEL_STAT_H */
...@@ -991,7 +991,7 @@ struct task_struct { ...@@ -991,7 +991,7 @@ struct task_struct {
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
unsigned int rt_priority; unsigned int rt_priority;
cputime_t utime, stime; cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime; cputime_t gtime;
unsigned long nvcsw, nivcsw; /* context switch counts */ unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */ struct timespec start_time; /* monotonic time */
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
*/ */
#define TASKSTATS_VERSION 5 #define TASKSTATS_VERSION 6
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */ * in linux/sched.h */
...@@ -85,9 +85,12 @@ struct taskstats { ...@@ -85,9 +85,12 @@ struct taskstats {
* On some architectures, value will adjust for cpu time stolen * On some architectures, value will adjust for cpu time stolen
* from the kernel in involuntary waits due to virtualization. * from the kernel in involuntary waits due to virtualization.
* Value is cumulative, in nanoseconds, without a corresponding count * Value is cumulative, in nanoseconds, without a corresponding count
* and wraps around to zero silently on overflow * and wraps around to zero silently on overflow. The
* _scaled_ version accounts for cpus which can scale the
* number of instructions executed each cycle.
*/ */
__u64 cpu_run_real_total; __u64 cpu_run_real_total;
__u64 cpu_scaled_run_real_total;
/* cpu "virtual" running time /* cpu "virtual" running time
* Uses time intervals seen by the kernel i.e. no adjustment * Uses time intervals seen by the kernel i.e. no adjustment
...@@ -142,6 +145,10 @@ struct taskstats { ...@@ -142,6 +145,10 @@ struct taskstats {
__u64 write_char; /* bytes written */ __u64 write_char; /* bytes written */
__u64 read_syscalls; /* read syscalls */ __u64 read_syscalls; /* read syscalls */
__u64 write_syscalls; /* write syscalls */ __u64 write_syscalls; /* write syscalls */
/* time accounting for SMT machines */
__u64 ac_utimescaled; /* utime scaled on frequency etc */
__u64 ac_stimescaled; /* stime scaled on frequency etc */
/* Extended accounting fields end */ /* Extended accounting fields end */
#define TASKSTATS_HAS_IO_ACCOUNTING #define TASKSTATS_HAS_IO_ACCOUNTING
......
...@@ -115,6 +115,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) ...@@ -115,6 +115,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
tmp += timespec_to_ns(&ts); tmp += timespec_to_ns(&ts);
d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
tmp = (s64)d->cpu_scaled_run_real_total;
cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
tmp += timespec_to_ns(&ts);
d->cpu_scaled_run_real_total =
(tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
/* /*
* No locking available for sched_info (and too expensive to add one) * No locking available for sched_info (and too expensive to add one)
* Mitigate by taking snapshot of values * Mitigate by taking snapshot of values
......
...@@ -1059,6 +1059,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1059,6 +1059,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->utime = cputime_zero; p->utime = cputime_zero;
p->stime = cputime_zero; p->stime = cputime_zero;
p->gtime = cputime_zero; p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
#ifdef CONFIG_TASK_XACCT #ifdef CONFIG_TASK_XACCT
p->rchar = 0; /* I/O counter: bytes read */ p->rchar = 0; /* I/O counter: bytes read */
......
...@@ -3333,6 +3333,16 @@ void account_guest_time(struct task_struct *p, cputime_t cputime) ...@@ -3333,6 +3333,16 @@ void account_guest_time(struct task_struct *p, cputime_t cputime)
cpustat->guest = cputime64_add(cpustat->guest, tmp); cpustat->guest = cputime64_add(cpustat->guest, tmp);
} }
/*
* Account scaled user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
*/
void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
{
p->utimescaled = cputime_add(p->utimescaled, cputime);
}
/* /*
* Account system cpu time to a process. * Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to * @p: the process that the cpu time gets accounted to
...@@ -3370,6 +3380,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset, ...@@ -3370,6 +3380,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
acct_update_integrals(p); acct_update_integrals(p);
} }
/*
* Account scaled system cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update
*/
void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
{
p->stimescaled = cputime_add(p->stimescaled, cputime);
}
/* /*
* Account for involuntary wait time. * Account for involuntary wait time.
* @p: the process from which the cpu time has been stolen * @p: the process from which the cpu time has been stolen
......
...@@ -826,10 +826,13 @@ void update_process_times(int user_tick) ...@@ -826,10 +826,13 @@ void update_process_times(int user_tick)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
/* Note: this timer irq context must be accounted for as well. */ /* Note: this timer irq context must be accounted for as well. */
if (user_tick) if (user_tick) {
account_user_time(p, jiffies_to_cputime(1)); account_user_time(p, jiffies_to_cputime(1));
else account_user_time_scaled(p, jiffies_to_cputime(1));
} else {
account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
account_system_time_scaled(p, jiffies_to_cputime(1));
}
run_local_timers(); run_local_timers();
if (rcu_pending(cpu)) if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_tick); rcu_check_callbacks(cpu, user_tick);
......
...@@ -62,6 +62,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) ...@@ -62,6 +62,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
rcu_read_unlock(); rcu_read_unlock();
stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
stats->ac_utimescaled =
cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
stats->ac_stimescaled =
cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
stats->ac_minflt = tsk->min_flt; stats->ac_minflt = tsk->min_flt;
stats->ac_majflt = tsk->maj_flt; stats->ac_majflt = tsk->maj_flt;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册