diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 15f3f61f7e3b734fe5f80f636663d63931ba74be..3d04d4505fdc76cdd6bd5cfa61e2d138191f3e2c 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -72,6 +72,7 @@ struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; + struct sparsemask *cfs_overload_cpus; }; struct sched_domain { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f8c29f1af2d086ec18fbf9933a1111ae71380e1d..1043d840f06f9f39c60f4f1a2d0ab2b1fc6be15a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -83,6 +83,7 @@ struct rq; struct cpuidle_state; +struct sparsemask; /* task_struct::on_rq states: */ #define TASK_ON_RQ_QUEUED 1 @@ -829,6 +830,7 @@ struct rq { struct cfs_rq cfs; struct rt_rq rt; struct dl_rq dl; + struct sparsemask *cfs_overload_cpus; #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this CPU: */ diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 4012acc5adcf70616da35182009e5e1f925a7b83..5ca6998d05476dfd85b98f56b7252c169bf5dce4 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -3,6 +3,7 @@ * Scheduler topology setup/handling methods */ #include "sched.h" +#include "sparsemask.h" DEFINE_MUTEX(sched_domains_mutex); @@ -409,7 +410,9 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym); static void update_top_cache_domain(int cpu) { + struct sparsemask *cfs_overload_cpus = NULL; struct sched_domain_shared *sds = NULL; + struct rq *rq = cpu_rq(cpu); struct sched_domain *sd; int id = cpu; int size = 1; @@ -419,8 +422,10 @@ static void update_top_cache_domain(int cpu) id = cpumask_first(sched_domain_span(sd)); size = cpumask_weight(sched_domain_span(sd)); sds = sd->shared; + cfs_overload_cpus = sds->cfs_overload_cpus; } + rcu_assign_pointer(rq->cfs_overload_cpus, cfs_overload_cpus); rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); per_cpu(sd_llc_size, cpu) = size; per_cpu(sd_llc_id, cpu) = id; @@ -1613,7 +1618,22 @@ static void __sdt_free(const struct cpumask *cpu_map) static int sd_llc_alloc(struct sched_domain *sd) { - /* Allocate sd->shared data here. Empty for now. */ + struct sched_domain_shared *sds = sd->shared; + struct cpumask *span = sched_domain_span(sd); + int nid = cpu_to_node(cpumask_first(span)); + int flags = __GFP_ZERO | GFP_KERNEL; + struct sparsemask *mask; + + /* + * Allocate the bitmap if not already allocated. This is called for + * every CPU in the LLC but only allocates once per sd_llc_shared. + */ + if (!sds->cfs_overload_cpus) { + mask = sparsemask_alloc_node(nr_cpu_ids, 3, flags, nid); + if (!mask) + return 1; + sds->cfs_overload_cpus = mask; + } return 0; } @@ -1625,7 +1645,8 @@ static void sd_llc_free(struct sched_domain *sd) if (!sds) return; - /* Free data here. Empty for now. */ + sparsemask_free(sds->cfs_overload_cpus); + sds->cfs_overload_cpus = NULL; } static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d)