提交 ad6a0d7a 编写于 作者: X Xie XiuQi

sched/cputime: account irq cputime for accurately

hulk inclusion
category: bugfix
bugzilla: 13257
CVE: NA

If there are a lot of interruptions, the CPU usage fluctuation
will be more serious. Because CONFIG_IRQ_TIME_ACCOUNTING is not
enabled, the interrupt time is not counted separately. If the
interrupt occurs during the idle period, the idle runtime including
irq time. When irq occurs during other task, the runtime of the
task include irq time also.

Therefore, when the irq is irregular, the occupancy statistics
of the CPU may fluctuate greatly.

In this patch, use CONFIG_IRQ_TIME_ACCOUNTING to count the
irq time, the irq time is subtracted from the idle time, and
the CPU usage can be relatively accurately counted.
Signed-off-by: NXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: NCheng Jian <cj.chengjian@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 21440a9c
...@@ -28,6 +28,8 @@ enum cpu_usage_stat { ...@@ -28,6 +28,8 @@ enum cpu_usage_stat {
CPUTIME_STEAL, CPUTIME_STEAL,
CPUTIME_GUEST, CPUTIME_GUEST,
CPUTIME_GUEST_NICE, CPUTIME_GUEST_NICE,
CPUTIME_SOFTIRQ_IDLE,
CPUTIME_IRQ_IDLE,
NR_STATS, NR_STATS,
}; };
......
...@@ -51,6 +51,7 @@ void irqtime_account_irq(struct task_struct *curr) ...@@ -51,6 +51,7 @@ void irqtime_account_irq(struct task_struct *curr)
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
s64 delta; s64 delta;
int cpu; int cpu;
struct rq *rq = this_rq();
if (!sched_clock_irqtime) if (!sched_clock_irqtime)
return; return;
...@@ -65,10 +66,15 @@ void irqtime_account_irq(struct task_struct *curr) ...@@ -65,10 +66,15 @@ void irqtime_account_irq(struct task_struct *curr)
* in that case, so as not to confuse scheduler with a special task * in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run. * that do not consume any time, but still wants to run.
*/ */
if (hardirq_count()) if (hardirq_count()) {
irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) if (curr == rq->idle)
kcpustat_this_cpu->cpustat[CPUTIME_IRQ_IDLE] += delta;
} else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
if (curr == rq->idle)
kcpustat_this_cpu->cpustat[CPUTIME_SOFTIRQ_IDLE] += delta;
}
} }
EXPORT_SYMBOL_GPL(irqtime_account_irq); EXPORT_SYMBOL_GPL(irqtime_account_irq);
...@@ -583,11 +589,18 @@ int sched_idle_time_adjust(int cpu, u64 *utime, u64 *stime) ...@@ -583,11 +589,18 @@ int sched_idle_time_adjust(int cpu, u64 *utime, u64 *stime)
struct rq_cputime *rq_cputime = &per_cpu(rq_cputimes, cpu); struct rq_cputime *rq_cputime = &per_cpu(rq_cputimes, cpu);
struct cputime *prev = &rq_cputime->cpu_prev_time; struct cputime *prev = &rq_cputime->cpu_prev_time;
struct cputime *last = &rq_cputime->cpu_last_time; struct cputime *last = &rq_cputime->cpu_last_time;
u64 ut, st, delta, delta_ut, delta_st; u64 ut, st, hi = 0, si = 0, delta, delta_ut, delta_st;
raw_spin_lock(&rq_cputime->lock); raw_spin_lock(&rq_cputime->lock);
delta = cpu_clock(cpu) - get_idle_time(cpu) #ifdef CONFIG_IRQ_TIME_ACCOUNTING
if (sched_clock_irqtime) {
hi = kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
si = kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
}
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
delta = cpu_clock(cpu) - get_idle_time(cpu) - (hi + si)
- (prev->utime + prev->stime); - (prev->utime + prev->stime);
ut = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; ut = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
...@@ -628,11 +641,21 @@ int sched_idle_time_adjust(int cpu, u64 *utime, u64 *stime) ...@@ -628,11 +641,21 @@ int sched_idle_time_adjust(int cpu, u64 *utime, u64 *stime)
unsigned long long sched_get_idle_time(int cpu) unsigned long long sched_get_idle_time(int cpu)
{ {
struct rq_cputime *rt = &per_cpu(rq_cputimes, cpu); struct rq_cputime *rt = &per_cpu(rq_cputimes, cpu);
u64 hi = 0, si = 0;
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
if (sched_clock_irqtime) {
hi = kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ_IDLE];
si = kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ_IDLE];
}
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
if (cpu_curr(cpu) == idle_task(cpu)) if (cpu_curr(cpu) == idle_task(cpu))
return rt->sum_idle_time + cpu_clock(cpu) - rt->last_entry_idle; return rt->sum_idle_time +
cpu_clock(cpu) - rt->last_entry_idle -
hi - si;
else else
return rt->sum_idle_time; return rt->sum_idle_time - hi - si;
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册