diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index cd2b767bbff809f8dec8e71e136775d7e4c85cf3..271847c1b4d8285dce22cbce3da3734b085e09b6 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -108,12 +108,12 @@ extern int sysctl_blocked_averages(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int sysctl_tick_update_load(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -extern int sysctl_update_load_tracking_aware(struct ctl_table *table, - int write, void __user *buffer, size_t *lenp, loff_t *ppos); +extern int sysctl_update_load_latency(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +extern unsigned int sysctl_load_tracking_latency; extern struct static_key_true sched_tick_update_load; extern struct static_key_true sched_blocked_averages; -extern struct static_key_false sched_load_tracking_aware_enable; #endif #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e5cf15fb9e84d552141a3f9b3e7edeb19a965f19..879cdcb243d145769c3a0a743f7fe8dc8ad58d81 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -45,35 +45,34 @@ unsigned int sysctl_sched_latency = 6000000ULL; static unsigned int normalized_sysctl_sched_latency = 6000000ULL; #ifdef CONFIG_IAS_SMART_LOAD_TRACKING -DEFINE_STATIC_KEY_FALSE(sched_load_tracking_aware_enable); -static void set_load_tracking_aware(bool enabled) -{ - if (enabled) - static_branch_enable(&sched_load_tracking_aware_enable); - else - static_branch_disable(&sched_load_tracking_aware_enable); -} +#define LANTENCY_MIN 10 +#define LANTENCY_MAX 30 +unsigned int sysctl_load_tracking_latency = LANTENCY_MIN; -int sysctl_update_load_tracking_aware(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) +int sysctl_update_load_latency(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) { + int ret; + int min = LANTENCY_MIN; + int max = LANTENCY_MAX; + int latency = sysctl_load_tracking_latency; struct ctl_table t; - int err; - int state = static_branch_likely(&sched_load_tracking_aware_enable); if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; t = *table; - t.data = &state; - err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); - if (err < 0) - return err; + t.data = &latency; + t.extra1 = &min; + t.extra2 = &max; - if (write) - set_load_tracking_aware(state); + ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); + if (ret || !write) + return ret; - return err; + sysctl_load_tracking_latency = latency; + + return 0; } #endif @@ -3844,42 +3843,39 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s #define SKIP_AGE_LOAD 0x2 #define DO_ATTACH 0x4 -#ifdef CONFIG_IAS_SMART_LOAD_TRACKING -/* - * Check load tracking senario. In single-core system without cpu frequency update, - * precise load tracking will be unnecessary. So here we just shutdown load tracking, - * for decreasing cpu usage. - */ -static inline int check_load_switch(void) -{ - if (static_branch_unlikely(&sched_load_tracking_aware_enable)) - if (num_online_cpus() == 1) - /* no need to update load average in single core senario */ - return 1; - - return 0; -} -#endif - /* Update task and its cfs_rq load average */ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { u64 now = cfs_rq_clock_pelt(cfs_rq); int decayed; - #ifdef CONFIG_IAS_SMART_LOAD_TRACKING - if (check_load_switch()) - return; + u64 delta; #endif + /* * Track task load average for carrying it to new CPU after migrated, and * track group sched_entity load average for task_h_load calc in migration */ +#ifdef CONFIG_IAS_SMART_LOAD_TRACKING + delta = now - se->avg.last_update_time; + delta >>= sysctl_load_tracking_latency; + + if (!delta) + return; + + if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) + __update_load_avg_se(now, cfs_rq, se); + + decayed = update_cfs_rq_load_avg(now, cfs_rq); + decayed |= propagate_entity_load_avg(se); +#else if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) __update_load_avg_se(now, cfs_rq, se); decayed = update_cfs_rq_load_avg(now, cfs_rq); decayed |= propagate_entity_load_avg(se); +#endif + if (!se->avg.last_update_time && (flags & DO_ATTACH)) { diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 9b0fde36de8c13616513ce91f25c552c046e96dd..9bec60bd9fa2eade0686a6fb3c338cdb65c10ae5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1691,13 +1691,11 @@ static struct ctl_table ias_table[] = { .extra2 = SYSCTL_ONE, }, { - .procname = "sched_load_tracking_aware_enable", + .procname = "sched_load_tracking_latency", .data = NULL, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = sysctl_update_load_tracking_aware, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, + .proc_handler = sysctl_update_load_latency, }, #endif { }