提交 0af02522 编写于 作者: H Hui Tang

sched: Add feature 'UTIL_TASKGROUP' for dynamic affinity

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I526XC

--------------------------------

If the feature enabled, util_avg of bottom-level tg is used,
otherwise, util_avg of rq's cfs_rq is used.
Signed-off-by: NHui Tang <tanghui20@huawei.com>
上级 1c9231cb
...@@ -7106,6 +7106,17 @@ static inline bool prefer_cpus_valid(struct task_struct *p) ...@@ -7106,6 +7106,17 @@ static inline bool prefer_cpus_valid(struct task_struct *p)
cpumask_subset(p->prefer_cpus, p->cpus_ptr); cpumask_subset(p->prefer_cpus, p->cpus_ptr);
} }
static inline unsigned long taskgroup_cpu_util(struct task_group *tg,
int cpu)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tg->se[cpu] && sched_feat(DA_UTIL_TASKGROUP))
return tg->se[cpu]->avg.util_avg;
#endif
return cpu_util(cpu);
}
/* /*
* set_task_select_cpus: select the cpu range for task * set_task_select_cpus: select the cpu range for task
* @p: the task whose available cpu range will to set * @p: the task whose available cpu range will to set
...@@ -7136,13 +7147,11 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, ...@@ -7136,13 +7147,11 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu,
rcu_read_lock(); rcu_read_lock();
tg = task_group(p); tg = task_group(p);
for_each_cpu(cpu, p->prefer_cpus) { for_each_cpu(cpu, p->prefer_cpus) {
if (unlikely(!tg->se[cpu]))
continue;
if (idlest_cpu && available_idle_cpu(cpu)) { if (idlest_cpu && available_idle_cpu(cpu)) {
*idlest_cpu = cpu; *idlest_cpu = cpu;
} else if (idlest_cpu) { } else if (idlest_cpu) {
spare = (long)(capacity_of(cpu) - tg->se[cpu]->avg.util_avg); spare = (long)(capacity_of(cpu) -
taskgroup_cpu_util(tg, cpu));
if (spare > min_util) { if (spare > min_util) {
min_util = spare; min_util = spare;
*idlest_cpu = cpu; *idlest_cpu = cpu;
...@@ -7157,7 +7166,7 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, ...@@ -7157,7 +7166,7 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu,
return; return;
} }
util_avg_sum += tg->se[cpu]->avg.util_avg; util_avg_sum += taskgroup_cpu_util(tg, cpu);
tg_capacity += capacity_of(cpu); tg_capacity += capacity_of(cpu);
} }
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -106,3 +106,10 @@ SCHED_FEAT(UTIL_EST_FASTUP, true) ...@@ -106,3 +106,10 @@ SCHED_FEAT(UTIL_EST_FASTUP, true)
SCHED_FEAT(ALT_PERIOD, true) SCHED_FEAT(ALT_PERIOD, true)
SCHED_FEAT(BASE_SLICE, true) SCHED_FEAT(BASE_SLICE, true)
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
/*
* Use util_avg of bottom-Level taskgroup
*/
SCHED_FEAT(DA_UTIL_TASKGROUP, true)
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册