提交 20b83193 编写于 作者: Z Zhang Qiao 提交者: Zheng Zengkai

sched/fair: Add qos_throttle_list node in struct cfs_rq

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I50PPU
CVE: NA

-----------------------------------------------------------------

when unthrottle a cfs_rq at distribute_cfs_runtime(), another cpu
may re-throttle this cfs_rq at qos_throttle_cfs_rq() before access
the cfs_rq->throttle_list.next, but meanwhile, qos throttle will
attach the cfs_rq throttle_list node to percpu qos_throttled_cfs_rq,
it will change cfs_rq->throttle_list.next and cause panic or hardlockup
at distribute_cfs_runtime().

Fix it by adding a qos_throttle_list node in struct cfs_rq, and qos
throttle disuse the cfs_rq->throttle_list.
Signed-off-by: NZhang Qiao <zhangqiao22@huawei.com>
Reviewed-by: Nzheng zucheng <zhengzucheng@huawei.com>
Reviewed-by: NChen Hui <judy.chenhui@huawei.com>
Reviewed-by: NWang Weiyang <wangweiyang2@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 729e6a2e
...@@ -5385,6 +5385,9 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) ...@@ -5385,6 +5385,9 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{ {
cfs_rq->runtime_enabled = 0; cfs_rq->runtime_enabled = 0;
INIT_LIST_HEAD(&cfs_rq->throttled_list); INIT_LIST_HEAD(&cfs_rq->throttled_list);
#ifdef CONFIG_QOS_SCHED
INIT_LIST_HEAD(&cfs_rq->qos_throttled_list);
#endif
} }
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
...@@ -7204,7 +7207,8 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -7204,7 +7207,8 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->throttled = 1; cfs_rq->throttled = 1;
cfs_rq->throttled_clock = rq_clock(rq); cfs_rq->throttled_clock = rq_clock(rq);
list_add(&cfs_rq->throttled_list, &per_cpu(qos_throttled_cfs_rq, cpu_of(rq))); list_add(&cfs_rq->qos_throttled_list,
&per_cpu(qos_throttled_cfs_rq, cpu_of(rq)));
} }
static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq) static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
...@@ -7223,7 +7227,7 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -7223,7 +7227,7 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
update_rq_clock(rq); update_rq_clock(rq);
cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
list_del_init(&cfs_rq->throttled_list); list_del_init(&cfs_rq->qos_throttled_list);
/* update hierarchical throttle state */ /* update hierarchical throttle state */
walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
...@@ -7266,7 +7270,7 @@ static int __unthrottle_qos_cfs_rqs(int cpu) ...@@ -7266,7 +7270,7 @@ static int __unthrottle_qos_cfs_rqs(int cpu)
int res = 0; int res = 0;
list_for_each_entry_safe(cfs_rq, tmp_rq, &per_cpu(qos_throttled_cfs_rq, cpu), list_for_each_entry_safe(cfs_rq, tmp_rq, &per_cpu(qos_throttled_cfs_rq, cpu),
throttled_list) { qos_throttled_list) {
if (cfs_rq_throttled(cfs_rq)) { if (cfs_rq_throttled(cfs_rq)) {
unthrottle_qos_cfs_rq(cfs_rq); unthrottle_qos_cfs_rq(cfs_rq);
res++; res++;
......
...@@ -626,8 +626,12 @@ struct cfs_rq { ...@@ -626,8 +626,12 @@ struct cfs_rq {
#endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
#if !defined(__GENKSYMS__) && defined(CONFIG_QOS_SCHED)
struct list_head qos_throttled_list;
#else
KABI_RESERVE(1) KABI_RESERVE(1)
KABI_RESERVE(2) KABI_RESERVE(2)
#endif
KABI_RESERVE(3) KABI_RESERVE(3)
KABI_RESERVE(4) KABI_RESERVE(4)
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册