You need to sign in or sign up before continuing.
提交 79bec4c6 编写于 作者: S Steve Sistare 提交者: Zheng Zengkai

sched/topology: Provide hooks to allocate data shared per LLC

hulk inclusion
category: feature
bugzilla: 38261, https://gitee.com/openeuler/kernel/issues/I49XPZ
CVE: NA

---------------------------

Add functions sd_llc_alloc_all() and sd_llc_free_all() to allocate and
free data pointed to by struct sched_domain_shared at the last-level-cache
domain.  sd_llc_alloc_all() is called after the SD hierarchy is known, to
eliminate the unnecessary allocations that would occur if we instead
allocated in __sdt_alloc() and then figured out which shared nodes are
redundant.
Signed-off-by: NSteve Sistare <steven.sistare@oracle.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NChen Hui <judy.chenhui@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 bf693f72
...@@ -10,6 +10,12 @@ DEFINE_MUTEX(sched_domains_mutex); ...@@ -10,6 +10,12 @@ DEFINE_MUTEX(sched_domains_mutex);
static cpumask_var_t sched_domains_tmpmask; static cpumask_var_t sched_domains_tmpmask;
static cpumask_var_t sched_domains_tmpmask2; static cpumask_var_t sched_domains_tmpmask2;
struct s_data;
static int sd_llc_alloc(struct sched_domain *sd);
static void sd_llc_free(struct sched_domain *sd);
static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d);
static void sd_llc_free_all(const struct cpumask *cpu_map);
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
static int __init sched_debug_setup(char *str) static int __init sched_debug_setup(char *str)
...@@ -596,8 +602,10 @@ static void destroy_sched_domain(struct sched_domain *sd) ...@@ -596,8 +602,10 @@ static void destroy_sched_domain(struct sched_domain *sd)
*/ */
free_sched_groups(sd->groups, 1); free_sched_groups(sd->groups, 1);
if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) {
sd_llc_free(sd);
kfree(sd->shared); kfree(sd->shared);
}
kfree(sd); kfree(sd);
} }
...@@ -1238,6 +1246,7 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, ...@@ -1238,6 +1246,7 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
free_percpu(d->sd); free_percpu(d->sd);
fallthrough; fallthrough;
case sa_sd_storage: case sa_sd_storage:
sd_llc_free_all(cpu_map);
__sdt_free(cpu_map); __sdt_free(cpu_map);
fallthrough; fallthrough;
case sa_none: case sa_none:
...@@ -1849,6 +1858,62 @@ static void __sdt_free(const struct cpumask *cpu_map) ...@@ -1849,6 +1858,62 @@ static void __sdt_free(const struct cpumask *cpu_map)
} }
} }
static int sd_llc_alloc(struct sched_domain *sd)
{
/* Allocate sd->shared data here. Empty for now. */
return 0;
}
static void sd_llc_free(struct sched_domain *sd)
{
struct sched_domain_shared *sds = sd->shared;
if (!sds)
return;
/* Free data here. Empty for now. */
}
static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d)
{
struct sched_domain *sd, *hsd;
int i;
for_each_cpu(i, cpu_map) {
/* Find highest domain that shares resources */
hsd = NULL;
for (sd = *per_cpu_ptr(d->sd, i); sd; sd = sd->parent) {
if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
break;
hsd = sd;
}
if (hsd && sd_llc_alloc(hsd))
return 1;
}
return 0;
}
static void sd_llc_free_all(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
struct sched_domain *sd;
struct sd_data *sdd;
int j;
for_each_sd_topology(tl) {
sdd = &tl->data;
if (!sdd)
continue;
for_each_cpu(j, cpu_map) {
sd = *per_cpu_ptr(sdd->sd, j);
if (sd)
sd_llc_free(sd);
}
}
}
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr, const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *child, int dflags, int cpu) struct sched_domain *child, int dflags, int cpu)
...@@ -2049,6 +2114,14 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att ...@@ -2049,6 +2114,14 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
} }
} }
/*
* Allocate shared sd data at last level cache. Must be done after
* domains are built above, but before the data is used in
* cpu_attach_domain and descendants below.
*/
if (sd_llc_alloc_all(cpu_map, &d))
goto error;
/* Attach the domains */ /* Attach the domains */
rcu_read_lock(); rcu_read_lock();
for_each_cpu(i, cpu_map) { for_each_cpu(i, cpu_map) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册