diff --git a/fs/proc/base.c b/fs/proc/base.c index 5836b4b25052ade966d4d7f8ed29c6a92cebfdd8..22c65289128e36f24fe7243b17db26e1aaa5d99e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3298,6 +3298,12 @@ static ssize_t preferred_cpuset_write(struct file *file, const char __user *buf, if (retval < 0) goto out_free_cpumask; + if (!cpumask_empty(new_mask)) { + cpus_read_lock(); + dynamic_affinity_enable(); + cpus_read_unlock(); + } + retval = count; out_free_cpumask: diff --git a/include/linux/sched.h b/include/linux/sched.h index 3b8f72bf0c6bb049022bc297aaebd9e412b9c4a1..3aae225f98a7f343aa921b413553aba650612325 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2227,6 +2227,7 @@ int set_prefer_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask); int sched_prefer_cpus_fork(struct task_struct *p, struct cpumask *mask); void sched_prefer_cpus_free(struct task_struct *p); +void dynamic_affinity_enable(void); #endif #ifdef CONFIG_BPF_SCHED diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 2529a807599bf14522f1ebcd36f686351419d713..90de01cc6827b6eeac74657ef19b02fd184d4ba3 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -734,6 +734,9 @@ static int update_prefer_cpumask(struct cpuset *cs, struct cpuset *trialcs, update_tasks_prefer_cpumask(trialcs); + if (!cpumask_empty(trialcs->prefer_cpus)) + dynamic_affinity_enable(); + spin_lock_irq(&callback_lock); cpumask_copy(cs->prefer_cpus, trialcs->prefer_cpus); spin_unlock_irq(&callback_lock); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c7b560ffb75edd63b29dfc5b9bc8075dd8e637b8..ff209d25c21cf43b587abc3f3973590329cd98d7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7067,6 +7067,27 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) } #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY + +#ifdef CONFIG_JUMP_LABEL +static DEFINE_STATIC_KEY_FALSE(__dynamic_affinity_used); + +static inline bool dynamic_affinity_used(void) +{ + return static_branch_unlikely(&__dynamic_affinity_used); +} + +void dynamic_affinity_enable(void) +{ + static_branch_enable_cpuslocked(&__dynamic_affinity_used); +} + +#else /* CONFIG_JUMP_LABEL */ +static bool dynamic_affinity_used(void) +{ + return true; +} +#endif + /* * Low utilization threshold for CPU * @@ -7076,6 +7097,9 @@ int sysctl_sched_util_low_pct = 85; static inline bool prefer_cpus_valid(struct task_struct *p) { + if (!dynamic_affinity_used()) + return false; + return p->prefer_cpus && !cpumask_empty(p->prefer_cpus) && !cpumask_equal(p->prefer_cpus, p->cpus_ptr) &&