From 2666666a66b4085dbfbbf476d4f3e0f604f8b736 Mon Sep 17 00:00:00 2001 From: Zheng Zucheng Date: Mon, 29 Nov 2021 15:12:30 +0800 Subject: [PATCH] sched: Introduce qos scheduler for co-location hulk inclusion category: feature bugzilla: 51828, https://gitee.com/openeuler/kernel/issues/I4K96G CVE: NA -------------------------------- We introduce the idea of qos level to scheduler, which now is supported with different scheduler policies. The qos scheduler will change the policy of correlative tasks when the qos level of a task group is modified with cpu.qos_level cpu cgroup file. In this way we are able to satisfy different needs of tasks in different qos levels. Signed-off-by: Zhang Qiao Signed-off-by: Zheng Zucheng Reviewed-by: Chen Hui Reviewed-by: Xiu Jianfeng Signed-off-by: Yang Yingliang --- init/Kconfig | 8 ++++ kernel/sched/core.c | 93 ++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 4 ++ 3 files changed, 105 insertions(+) diff --git a/init/Kconfig b/init/Kconfig index c05347a29ca4..a338519692d5 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -777,6 +777,14 @@ menuconfig CGROUP_SCHED tasks. if CGROUP_SCHED +config QOS_SCHED + bool "Qos task scheduling" + depends on CGROUP_SCHED + depends on CFS_BANDWIDTH + depends on X86 + + default n + config FAIR_GROUP_SCHED bool "Group scheduling for SCHED_OTHER" depends on CGROUP_SCHED diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8866cd7f19c4..23160df884e4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6338,6 +6338,15 @@ void ia64_set_curr_task(int cpu, struct task_struct *p) /* task_group_lock serializes the addition/removal of task groups */ static DEFINE_SPINLOCK(task_group_lock); +#ifdef CONFIG_QOS_SCHED +static int alloc_qos_sched_group(struct task_group *tg, struct task_group *parent) +{ + tg->qos_level = parent->qos_level; + + return 1; +} +#endif + static void sched_free_group(struct task_group *tg) { free_fair_sched_group(tg); @@ -6358,6 +6367,11 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_fair_sched_group(tg, parent)) goto err; +#ifdef CONFIG_QOS_SCHED + if (!alloc_qos_sched_group(tg, parent)) + goto err; +#endif + if (!alloc_rt_sched_group(tg, parent)) goto err; @@ -6426,6 +6440,30 @@ static void sched_change_group(struct task_struct *tsk, int type) tg = autogroup_task_group(tsk, tg); tsk->sched_task_group = tg; +#ifdef CONFIG_QOS_SCHED + /* + * No need to re-setcheduler when a task is exiting or the task + * is in an autogroup. + */ + if (!rt_task(tsk) + && !(tsk->flags & PF_EXITING) + && !task_group_is_autogroup(tg)) { + struct rq *rq = task_rq(tsk); + struct sched_attr attr = { + .sched_priority = 0, + }; + + if (tg->qos_level == -1) { + attr.sched_policy = SCHED_IDLE; + } else { + attr.sched_policy = SCHED_NORMAL; + } + attr.sched_nice = PRIO_TO_NICE(tsk->static_prio); + + __setscheduler(rq, tsk, &attr, 0); + } +#endif + #ifdef CONFIG_FAIR_GROUP_SCHED if (tsk->sched_class->task_change_group) tsk->sched_class->task_change_group(tsk, type); @@ -6886,6 +6924,54 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, } #endif /* CONFIG_RT_GROUP_SCHED */ +#ifdef CONFIG_QOS_SCHED +static int cpu_qos_write(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 qos_level) +{ + struct css_task_iter it; + struct task_struct *tsk; + struct task_group *tg; + struct sched_param param; + int pid, policy; + tg = css_tg(css); + + if (!tg->se[0]) + return -EINVAL; + + if (qos_level != -1 && qos_level != 0) + return -EINVAL; + + if (tg->qos_level == qos_level) + goto done; + + if (qos_level == -1) { + policy = SCHED_IDLE; + } else { + policy = SCHED_NORMAL; + } + + tg->qos_level = qos_level; + + param.sched_priority = 0; + css_task_iter_start(css, 0, &it); + while ((tsk = css_task_iter_next(&it))) { + pid = task_tgid_vnr(tsk); + + if (pid > 0 && !rt_task(tsk)) + sched_setscheduler(tsk, policy, ¶m); + } + css_task_iter_end(&it); + +done: + return 0; +} + +static s64 cpu_qos_read(struct cgroup_subsys_state *css, struct cftype *cft) +{ + return css_tg(css)->qos_level; +} +#endif /* CONFIG_QOS_SCHED */ + static struct cftype cpu_legacy_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -6921,6 +7007,13 @@ static struct cftype cpu_legacy_files[] = { .read_u64 = cpu_rt_period_read_uint, .write_u64 = cpu_rt_period_write_uint, }, +#endif +#ifdef CONFIG_QOS_SCHED + { + .name = "qos_level", + .read_s64 = cpu_qos_read, + .write_s64 = cpu_qos_write, + }, #endif { } /* Terminate */ }; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e6238db9dc99..c263cb2f35c5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -402,7 +402,11 @@ struct task_group { struct cfs_bandwidth cfs_bandwidth; +#if defined(CONFIG_QOS_SCHED) && !defined(__GENKSYMS__) + long qos_level; +#else KABI_RESERVE(1) +#endif KABI_RESERVE(2) }; -- GitLab