diff --git a/include/linux/sched/grid_qos.h b/include/linux/sched/grid_qos.h index cea2bf65188068599ecaf8a737b8caa05ebf1f1b..23d08dbb6ae604d176738efa47dd4b66b6961f42 100644 --- a/include/linux/sched/grid_qos.h +++ b/include/linux/sched/grid_qos.h @@ -2,6 +2,7 @@ #ifndef _LINUX_SCHED_GRID_QOS_H #define _LINUX_SCHED_GRID_QOS_H #include +#include #ifdef CONFIG_QOS_SCHED_SMART_GRID enum sched_grid_qos_class { @@ -61,6 +62,7 @@ struct sched_grid_qos_power { struct sched_grid_qos_affinity { nodemask_t mem_preferred_node_mask; + const struct cpumask *prefer_cpus; }; struct task_struct; @@ -72,6 +74,11 @@ struct sched_grid_qos { int (*affinity_set)(struct task_struct *p); }; +static inline int sched_qos_affinity_set(struct task_struct *p) +{ + return p->grid_qos->affinity_set(p); +} + int sched_grid_qos_fork(struct task_struct *p, struct task_struct *orig); void sched_grid_qos_free(struct task_struct *p); @@ -88,5 +95,10 @@ sched_grid_preferred_nid(int preferred_nid, nodemask_t *nodemask) { return preferred_nid; } + +static inline int sched_qos_affinity_set(struct task_struct *p) +{ + return 0; +} #endif #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a60f58afb28c19daf1c07cb19150cee4c4d2fc43..8a7535c4c7c9d53f23681f90b0c4cf9a74b24570 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6988,9 +6988,6 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, int tg_set_dynamic_affinity_mode(struct task_group *tg, u64 mode) { struct auto_affinity *auto_affi = tg->auto_affinity; - int ret = 0; - - raw_spin_lock_irq(&auto_affi->lock); /* auto mode*/ if (mode == 1) { @@ -6998,14 +6995,10 @@ int tg_set_dynamic_affinity_mode(struct task_group *tg, u64 mode) } else if (mode == 0) { stop_auto_affinity(auto_affi); } else { - raw_spin_unlock_irq(&auto_affi->lock); return -EINVAL; } - auto_affi->mode = mode; - raw_spin_unlock_irq(&auto_affi->lock); - - return ret; + return 0; } static u64 cpu_affinity_mode_read_u64(struct cgroup_subsys_state *css, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ee153e3c931feb07a7aa612eb3e6e4694e00dac5..d2acee43f656c290f91f6c2c27c5b83c003c37d6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -28,9 +28,7 @@ #include #include #endif -#ifdef CONFIG_QOS_SCHED_SMART_GRID #include -#endif #include /* @@ -5293,6 +5291,7 @@ static unsigned long target_load(int cpu, int type); static unsigned long capacity_of(int cpu); static int sched_idle_cpu(int cpu); static unsigned long weighted_cpuload(struct rq *rq); +static inline bool prefer_cpus_valid(struct task_struct *p); int sysctl_affinity_adjust_delay_ms = 5000; @@ -5308,22 +5307,29 @@ static void smart_grid_usage_dec(void) static_key_slow_dec(&__smart_grid_used); } -static void tg_update_task_prefer_cpus(struct task_group *tg) +static inline struct cpumask *task_prefer_cpus(struct task_struct *p) { - struct affinity_domain *ad = &tg->auto_affinity->ad; - struct task_struct *task; - struct css_task_iter it; + struct affinity_domain *ad; - css_task_iter_start(&tg->css, 0, &it); - while ((task = css_task_iter_next(&it))) { - if (tg == &root_task_group && !task->mm) - continue; + if (!smart_grid_used()) + return p->prefer_cpus; - set_prefer_cpus_ptr(task, ad->domains[ad->curr_level]); - /* grid_qos must not be NULL */ - task->grid_qos->affinity_set(task); - } - css_task_iter_end(&it); + if (task_group(p)->auto_affinity->mode == 0) + return &p->cpus_allowed; + + ad = &task_group(p)->auto_affinity->ad; + return ad->domains[ad->curr_level]; +} + +static inline int dynamic_affinity_mode(struct task_struct *p) +{ + if (!prefer_cpus_valid(p)) + return -1; + + if (smart_grid_used()) + return task_group(p)->auto_affinity->mode == 0 ? -1 : 1; + + return 0; } static void affinity_domain_up(struct task_group *tg) @@ -5344,8 +5350,6 @@ static void affinity_domain_up(struct task_group *tg) if (level == ad->dcount) return; - - tg_update_task_prefer_cpus(tg); } static void affinity_domain_down(struct task_group *tg) @@ -5366,8 +5370,6 @@ static void affinity_domain_down(struct task_group *tg) if (!level) return; - - tg_update_task_prefer_cpus(tg); } static enum hrtimer_restart sched_auto_affi_period_timer(struct hrtimer *timer) @@ -5433,8 +5435,6 @@ static int tg_update_affinity_domain_down(struct task_group *tg, void *data) if (!smart_grid_used()) return 0; - if (auto_affi->mode) - tg_update_task_prefer_cpus(tg); return 0; } @@ -5452,35 +5452,41 @@ void tg_update_affinity_domains(int cpu, int online) void start_auto_affinity(struct auto_affinity *auto_affi) { - struct task_group *tg = auto_affi->tg; ktime_t delay_ms; - if (auto_affi->period_active == 1) + raw_spin_lock_irq(&auto_affi->lock); + if (auto_affi->period_active == 1) { + raw_spin_unlock_irq(&auto_affi->lock); return; - - tg_update_task_prefer_cpus(tg); + } auto_affi->period_active = 1; + auto_affi->mode = 1; delay_ms = ms_to_ktime(sysctl_affinity_adjust_delay_ms); hrtimer_forward_now(&auto_affi->period_timer, delay_ms); hrtimer_start_expires(&auto_affi->period_timer, HRTIMER_MODE_ABS_PINNED); + raw_spin_unlock_irq(&auto_affi->lock); + smart_grid_usage_inc(); } void stop_auto_affinity(struct auto_affinity *auto_affi) { - struct task_group *tg = auto_affi->tg; struct affinity_domain *ad = &auto_affi->ad; - if (auto_affi->period_active == 0) + raw_spin_lock_irq(&auto_affi->lock); + if (auto_affi->period_active == 0) { + raw_spin_unlock_irq(&auto_affi->lock); return; + } hrtimer_cancel(&auto_affi->period_timer); auto_affi->period_active = 0; + auto_affi->mode = 0; ad->curr_level = ad->dcount > 0 ? ad->dcount - 1 : 0; + raw_spin_unlock_irq(&auto_affi->lock); - tg_update_task_prefer_cpus(tg); smart_grid_usage_dec(); } @@ -5698,6 +5704,19 @@ static void destroy_auto_affinity(struct task_group *tg) } #else static void destroy_auto_affinity(struct task_group *tg) {} + +#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY +static inline struct cpumask *task_prefer_cpus(struct task_struct *p) +{ + return p->prefer_cpus; +} +#endif + +static inline int dynamic_affinity_mode(struct task_struct *p) +{ + return 0; +} + #endif /************************************************** @@ -7166,10 +7185,11 @@ int sysctl_sched_util_low_pct = 85; static inline bool prefer_cpus_valid(struct task_struct *p) { - return p->prefer_cpus && - !cpumask_empty(p->prefer_cpus) && - !cpumask_equal(p->prefer_cpus, &p->cpus_allowed) && - cpumask_subset(p->prefer_cpus, &p->cpus_allowed); + struct cpumask *prefer_cpus = task_prefer_cpus(p); + + return !cpumask_empty(prefer_cpus) && + !cpumask_equal(prefer_cpus, &p->cpus_allowed) && + cpumask_subset(prefer_cpus, &p->cpus_allowed); } /* @@ -7193,20 +7213,23 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, long min_util = INT_MIN; struct task_group *tg; long spare; - int cpu; + int cpu, mode; - p->select_cpus = &p->cpus_allowed; - if (!prefer_cpus_valid(p)) + rcu_read_lock(); + mode = dynamic_affinity_mode(p); + if (mode == -1) { + rcu_read_unlock(); return; - - if (smart_grid_used()) { - p->select_cpus = p->prefer_cpus; + } else if (mode == 1) { + p->select_cpus = task_prefer_cpus(p); if (idlest_cpu) *idlest_cpu = cpumask_first(p->select_cpus); + sched_qos_affinity_set(p); + rcu_read_unlock(); return; } - rcu_read_lock(); + /* manual mode */ tg = task_group(p); for_each_cpu(cpu, p->prefer_cpus) { if (unlikely(!tg->se[cpu])) @@ -7273,7 +7296,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY p->select_cpus = &p->cpus_allowed; - if (dynamic_affinity_used()) + if (dynamic_affinity_used() || smart_grid_used()) set_task_select_cpus(p, &idlest_cpu, sd_flag); #endif @@ -8301,7 +8324,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY p->select_cpus = &p->cpus_allowed; - if (dynamic_affinity_used()) + if (dynamic_affinity_used() || smart_grid_used()) set_task_select_cpus(p, NULL, 0); if (!cpumask_test_cpu(env->dst_cpu, p->select_cpus)) { diff --git a/kernel/sched/grid/qos.c b/kernel/sched/grid/qos.c index 525778d5a45d610f9fd541056fd5b8ad54b948e3..f0f10dfb9fd4b4c1c5fb8220c29a57ad62ce026e 100644 --- a/kernel/sched/grid/qos.c +++ b/kernel/sched/grid/qos.c @@ -23,20 +23,26 @@ #include #include "internal.h" -static int qos_affinity_set(struct task_struct *p) +static inline int qos_affinity_set(struct task_struct *p) { int n; struct sched_grid_qos_affinity *affinity = &p->grid_qos->affinity; - nodes_clear(affinity->mem_preferred_node_mask); + if (likely(affinity->prefer_cpus == p->select_cpus)) + return 0; + /* * We want the memory allocation to be as close to the CPU * as possible, and adjust after getting memory bandwidth usage. */ - for (n = 0; n < nr_node_ids; n++) - if (cpumask_intersects(cpumask_of_node(n), p->prefer_cpus)) + for (n = 0; n < nr_node_ids; n++) { + if (cpumask_intersects(cpumask_of_node(n), p->select_cpus)) node_set(n, affinity->mem_preferred_node_mask); + else + node_clear(n, affinity->mem_preferred_node_mask); + } + affinity->prefer_cpus = p->select_cpus; return 0; }