提交 5bbd0d05 编写于 作者: C Chengming Zhou 提交者: Zheng Zengkai

sched/fair: Fix cfs_rq_clock_pelt() for throttled cfs_rq

stable inclusion
from stable-v5.10.121
commit 147a376c1afea117eccda36451121ea781aa5028
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5L6CQ

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=147a376c1afea117eccda36451121ea781aa5028

--------------------------------

[ Upstream commit 64eaf507 ]

Since commit 23127296 ("sched/fair: Update scale invariance of PELT")
change to use rq_clock_pelt() instead of rq_clock_task(), we should also
use rq_clock_pelt() for throttled_clock_task_time and throttled_clock_task
accounting to get correct cfs_rq_clock_pelt() of throttled cfs_rq. And
rename throttled_clock_task(_time) to be clock_pelt rather than clock_task.

Fixes: 23127296 ("sched/fair: Update scale invariance of PELT")
Signed-off-by: NChengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: NBen Segall <bsegall@google.com>
Reviewed-by: NVincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20220408115309.81603-1-zhouchengming@bytedance.comSigned-off-by: NSasha Levin <sashal@kernel.org>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Acked-by: NXie XiuQi <xiexiuqi@huawei.com>
上级 91d20b0c
...@@ -4862,8 +4862,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data) ...@@ -4862,8 +4862,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
cfs_rq->throttle_count--; cfs_rq->throttle_count--;
if (!cfs_rq->throttle_count) { if (!cfs_rq->throttle_count) {
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
cfs_rq->throttled_clock_task; cfs_rq->throttled_clock_pelt;
/* Add cfs_rq with already running entity in the list */ /* Add cfs_rq with already running entity in the list */
if (cfs_rq->nr_running >= 1) if (cfs_rq->nr_running >= 1)
...@@ -4880,7 +4880,7 @@ static int tg_throttle_down(struct task_group *tg, void *data) ...@@ -4880,7 +4880,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
/* group is entering throttled state, stop time */ /* group is entering throttled state, stop time */
if (!cfs_rq->throttle_count) { if (!cfs_rq->throttle_count) {
cfs_rq->throttled_clock_task = rq_clock_task(rq); cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
list_del_leaf_cfs_rq(cfs_rq); list_del_leaf_cfs_rq(cfs_rq);
} }
cfs_rq->throttle_count++; cfs_rq->throttle_count++;
...@@ -5306,7 +5306,7 @@ static void sync_throttle(struct task_group *tg, int cpu) ...@@ -5306,7 +5306,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
pcfs_rq = tg->parent->cfs_rq[cpu]; pcfs_rq = tg->parent->cfs_rq[cpu];
cfs_rq->throttle_count = pcfs_rq->throttle_count; cfs_rq->throttle_count = pcfs_rq->throttle_count;
cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
} }
/* conditionally throttle active cfs_rq's from put_prev_entity() */ /* conditionally throttle active cfs_rq's from put_prev_entity() */
......
...@@ -145,9 +145,9 @@ static inline u64 rq_clock_pelt(struct rq *rq) ...@@ -145,9 +145,9 @@ static inline u64 rq_clock_pelt(struct rq *rq)
static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
{ {
if (unlikely(cfs_rq->throttle_count)) if (unlikely(cfs_rq->throttle_count))
return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
} }
#else #else
static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
......
...@@ -625,8 +625,8 @@ struct cfs_rq { ...@@ -625,8 +625,8 @@ struct cfs_rq {
s64 runtime_remaining; s64 runtime_remaining;
u64 throttled_clock; u64 throttled_clock;
u64 throttled_clock_task; u64 throttled_clock_pelt;
u64 throttled_clock_task_time; u64 throttled_clock_pelt_time;
int throttled; int throttled;
int throttle_count; int throttle_count;
struct list_head throttled_list; struct list_head throttled_list;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册