提交 b3cf6ed8 编写于 作者: S Steve Sistare 提交者: Cheng Jian

sched/topology: Provide cfs_overload_cpus bitmap

hulk inclusion
category: feature
bugzilla: 38261, https://bugzilla.openeuler.org/show_bug.cgi?id=23
CVE: NA

---------------------------

Define and initialize a sparse bitmap of overloaded CPUs, per
last-level-cache scheduling domain, for use by the CFS scheduling class.
Save a pointer to cfs_overload_cpus in the rq for efficient access.
Signed-off-by: NSteve Sistare <steven.sistare@oracle.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 ef175d23
...@@ -72,6 +72,7 @@ struct sched_domain_shared { ...@@ -72,6 +72,7 @@ struct sched_domain_shared {
atomic_t ref; atomic_t ref;
atomic_t nr_busy_cpus; atomic_t nr_busy_cpus;
int has_idle_cores; int has_idle_cores;
struct sparsemask *cfs_overload_cpus;
}; };
struct sched_domain { struct sched_domain {
......
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
struct rq; struct rq;
struct cpuidle_state; struct cpuidle_state;
struct sparsemask;
/* task_struct::on_rq states: */ /* task_struct::on_rq states: */
#define TASK_ON_RQ_QUEUED 1 #define TASK_ON_RQ_QUEUED 1
...@@ -829,6 +830,7 @@ struct rq { ...@@ -829,6 +830,7 @@ struct rq {
struct cfs_rq cfs; struct cfs_rq cfs;
struct rt_rq rt; struct rt_rq rt;
struct dl_rq dl; struct dl_rq dl;
struct sparsemask *cfs_overload_cpus;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this CPU: */ /* list of leaf cfs_rq on this CPU: */
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Scheduler topology setup/handling methods * Scheduler topology setup/handling methods
*/ */
#include "sched.h" #include "sched.h"
#include "sparsemask.h"
DEFINE_MUTEX(sched_domains_mutex); DEFINE_MUTEX(sched_domains_mutex);
...@@ -409,7 +410,9 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym); ...@@ -409,7 +410,9 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
static void update_top_cache_domain(int cpu) static void update_top_cache_domain(int cpu)
{ {
struct sparsemask *cfs_overload_cpus = NULL;
struct sched_domain_shared *sds = NULL; struct sched_domain_shared *sds = NULL;
struct rq *rq = cpu_rq(cpu);
struct sched_domain *sd; struct sched_domain *sd;
int id = cpu; int id = cpu;
int size = 1; int size = 1;
...@@ -419,8 +422,10 @@ static void update_top_cache_domain(int cpu) ...@@ -419,8 +422,10 @@ static void update_top_cache_domain(int cpu)
id = cpumask_first(sched_domain_span(sd)); id = cpumask_first(sched_domain_span(sd));
size = cpumask_weight(sched_domain_span(sd)); size = cpumask_weight(sched_domain_span(sd));
sds = sd->shared; sds = sd->shared;
cfs_overload_cpus = sds->cfs_overload_cpus;
} }
rcu_assign_pointer(rq->cfs_overload_cpus, cfs_overload_cpus);
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_size, cpu) = size; per_cpu(sd_llc_size, cpu) = size;
per_cpu(sd_llc_id, cpu) = id; per_cpu(sd_llc_id, cpu) = id;
...@@ -1613,7 +1618,22 @@ static void __sdt_free(const struct cpumask *cpu_map) ...@@ -1613,7 +1618,22 @@ static void __sdt_free(const struct cpumask *cpu_map)
static int sd_llc_alloc(struct sched_domain *sd) static int sd_llc_alloc(struct sched_domain *sd)
{ {
/* Allocate sd->shared data here. Empty for now. */ struct sched_domain_shared *sds = sd->shared;
struct cpumask *span = sched_domain_span(sd);
int nid = cpu_to_node(cpumask_first(span));
int flags = __GFP_ZERO | GFP_KERNEL;
struct sparsemask *mask;
/*
* Allocate the bitmap if not already allocated. This is called for
* every CPU in the LLC but only allocates once per sd_llc_shared.
*/
if (!sds->cfs_overload_cpus) {
mask = sparsemask_alloc_node(nr_cpu_ids, 3, flags, nid);
if (!mask)
return 1;
sds->cfs_overload_cpus = mask;
}
return 0; return 0;
} }
...@@ -1625,7 +1645,8 @@ static void sd_llc_free(struct sched_domain *sd) ...@@ -1625,7 +1645,8 @@ static void sd_llc_free(struct sched_domain *sd)
if (!sds) if (!sds)
return; return;
/* Free data here. Empty for now. */ sparsemask_free(sds->cfs_overload_cpus);
sds->cfs_overload_cpus = NULL;
} }
static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d) static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册