提交 5d42e091 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

Revert "sched/cputime: use sched idle time accounting"

hulk inclusion
category: bugfix
bugzilla: 13257
CVE: NA

-------------------------------------------------

Avoid compile error.

This reverts commit 4c289dfb931da642f094f33c44c01d561b62f1c2.
上级 f7b617be
...@@ -45,16 +45,14 @@ static u64 get_iowait_time(int cpu) ...@@ -45,16 +45,14 @@ static u64 get_iowait_time(int cpu)
#else #else
u64 get_idle_time(int cpu) static u64 get_idle_time(int cpu)
{ {
u64 idle, idle_usecs = -1ULL; u64 idle, idle_usecs = -1ULL;
if (cpu_online(cpu)) if (cpu_online(cpu))
idle_usecs = get_cpu_idle_time_us(cpu, NULL); idle_usecs = get_cpu_idle_time_us(cpu, NULL);
if (idle_usecs == -1ULL && use_sched_idle_time) if (idle_usecs == -1ULL)
return sched_get_idle_time(cpu);
else if (idle_usecs == -1ULL)
/* !NO_HZ or cpu offline so we can rely on cpustat.idle */ /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
else else
...@@ -97,19 +95,9 @@ static int show_stat(struct seq_file *p, void *v) ...@@ -97,19 +95,9 @@ static int show_stat(struct seq_file *p, void *v)
getboottime64(&boottime); getboottime64(&boottime);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
if (use_sched_idle_time && cpu_online(i)) { user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
u64 u = 0, s = 0;
sched_idle_time_adjust(i, &u, &s);
user += u;
system += s;
} else {
user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
}
nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
idle += get_idle_time(i); idle += get_idle_time(i);
iowait += get_iowait_time(i); iowait += get_iowait_time(i);
irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
...@@ -143,13 +131,9 @@ static int show_stat(struct seq_file *p, void *v) ...@@ -143,13 +131,9 @@ static int show_stat(struct seq_file *p, void *v)
for_each_online_cpu(i) { for_each_online_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */ /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
if (use_sched_idle_time) { user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
sched_idle_time_adjust(i, &user, &system);
} else {
user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
}
nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
idle = get_idle_time(i); idle = get_idle_time(i);
iowait = get_iowait_time(i); iowait = get_iowait_time(i);
irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
......
...@@ -186,9 +186,4 @@ static inline void prev_cputime_init(struct prev_cputime *prev) ...@@ -186,9 +186,4 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
extern unsigned long long extern unsigned long long
task_sched_runtime(struct task_struct *task); task_sched_runtime(struct task_struct *task);
extern int use_sched_idle_time;
extern int sched_idle_time_adjust(int cpu, u64 *utime, u64 *stime);
extern unsigned long long sched_get_idle_time(int cpu);
extern u64 get_idle_time(int cpu);
#endif /* _LINUX_SCHED_CPUTIME_H */ #endif /* _LINUX_SCHED_CPUTIME_H */
...@@ -22,11 +22,9 @@ static inline void nohz_balance_enter_idle(int cpu) { } ...@@ -22,11 +22,9 @@ static inline void nohz_balance_enter_idle(int cpu) { }
#endif #endif
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
extern unsigned long tick_nohz_active;
void calc_load_nohz_start(void); void calc_load_nohz_start(void);
void calc_load_nohz_stop(void); void calc_load_nohz_stop(void);
#else #else
#define tick_nohz_active (0)
static inline void calc_load_nohz_start(void) { } static inline void calc_load_nohz_start(void) { }
static inline void calc_load_nohz_stop(void) { } static inline void calc_load_nohz_stop(void) { }
#endif /* CONFIG_NO_HZ_COMMON */ #endif /* CONFIG_NO_HZ_COMMON */
......
...@@ -5913,17 +5913,6 @@ static struct kmem_cache *task_group_cache __read_mostly; ...@@ -5913,17 +5913,6 @@ static struct kmem_cache *task_group_cache __read_mostly;
DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
static __init void rq_cputime_init(void)
{
int cpu;
struct rq_cputime *rq_cputime;
for_each_possible_cpu(cpu) {
rq_cputime = &per_cpu(rq_cputimes, cpu);
raw_spin_lock_init(&rq_cputime->lock);
}
}
void __init sched_init(void) void __init sched_init(void)
{ {
int i, j; int i, j;
...@@ -6085,9 +6074,6 @@ void __init sched_init(void) ...@@ -6085,9 +6074,6 @@ void __init sched_init(void)
init_schedstats(); init_schedstats();
if (use_sched_idle_time)
rq_cputime_init();
scheduler_running = 1; scheduler_running = 1;
} }
......
...@@ -567,74 +567,6 @@ static u64 scale_stime(u64 stime, u64 rtime, u64 total) ...@@ -567,74 +567,6 @@ static u64 scale_stime(u64 stime, u64 rtime, u64 total)
return scaled; return scaled;
} }
int use_sched_idle_time __read_mostly;
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq_cputime, rq_cputimes);
static int __init sched_idle_time_setup(char *str)
{
use_sched_idle_time = 1;
return 0;
}
early_param("use-sched-idle-time", sched_idle_time_setup);
int sched_idle_time_adjust(int cpu, u64 *utime, u64 *stime)
{
struct rq_cputime *rq_cputime = &per_cpu(rq_cputimes, cpu);
struct cputime *prev = &rq_cputime->cpu_prev_time;
struct cputime *last = &rq_cputime->cpu_last_time;
u64 ut, st, delta, delta_ut, delta_st;
raw_spin_lock(&rq_cputime->lock);
delta = cpu_clock(cpu) - get_idle_time(cpu)
- (prev->utime + prev->stime);
ut = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
st = kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
delta_ut = ut - last->utime;
delta_st = st - last->stime;
if (unlikely((s64)delta <= 0))
goto out;
if (delta_st == 0) {
prev->utime += delta;
} else if (delta_ut == 0) {
prev->stime += delta;
} else {
delta_st = scale_stime(delta_st, delta, delta_ut + delta_st);
if (unlikely(delta_st > delta))
delta_st = delta;
prev->stime += delta_st;
prev->utime += delta - delta_st;
}
out:
last->utime = ut;
last->stime = st;
*utime = prev->utime;
*stime = prev->stime;
raw_spin_unlock(&rq_cputime->lock);
return 0;
}
unsigned long long sched_get_idle_time(int cpu)
{
struct rq_cputime *rt = &per_cpu(rq_cputimes, cpu);
if (is_idle_task(curr_task(cpu)))
return rt->sum_idle_time + cpu_clock(cpu) - rt->last_entry_idle;
else
return rt->sum_idle_time;
}
/* /*
* Adjust tick based cputime random precision against scheduler runtime * Adjust tick based cputime random precision against scheduler runtime
* accounting. * accounting.
......
...@@ -919,21 +919,6 @@ struct rq { ...@@ -919,21 +919,6 @@ struct rq {
#endif #endif
}; };
struct cputime {
u64 utime;
u64 stime;
};
struct rq_cputime {
raw_spinlock_t lock;
unsigned long long sum_idle_time;
unsigned long long last_entry_idle;
struct cputime cpu_prev_time;
struct cputime cpu_last_time;
};
DECLARE_PER_CPU(struct rq_cputime, rq_cputimes);
static inline int cpu_of(struct rq *rq) static inline int cpu_of(struct rq *rq)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/sched/cputime.h>
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
...@@ -154,20 +153,6 @@ __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct ...@@ -154,20 +153,6 @@ __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct
static inline void static inline void
sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{ {
if (use_sched_idle_time && !tick_nohz_active) {
struct rq *rq = task_rq(prev);
struct rq_cputime *rq_cputime = this_cpu_ptr(&rq_cputimes);
unsigned long long now = cpu_clock(cpu_of(rq)), delta = 0;
if (prev == rq->idle) {
delta = now - rq_cputime->last_entry_idle;
rq_cputime->sum_idle_time += delta;
}
if (next == rq->idle)
rq_cputime->last_entry_idle = now;
}
if (unlikely(sched_info_on())) if (unlikely(sched_info_on()))
__sched_info_switch(rq, prev, next); __sched_info_switch(rq, prev, next);
} }
......
...@@ -149,12 +149,14 @@ static inline void tick_nohz_init(void) { } ...@@ -149,12 +149,14 @@ static inline void tick_nohz_init(void) { }
#endif #endif
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
extern unsigned long tick_nohz_active;
extern void timers_update_nohz(void); extern void timers_update_nohz(void);
# ifdef CONFIG_SMP # ifdef CONFIG_SMP
extern struct static_key_false timers_migration_enabled; extern struct static_key_false timers_migration_enabled;
# endif # endif
#else /* CONFIG_NO_HZ_COMMON */ #else /* CONFIG_NO_HZ_COMMON */
static inline void timers_update_nohz(void) { } static inline void timers_update_nohz(void) { }
#define tick_nohz_active (0)
#endif #endif
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册