提交 63fd54e3 编写于 作者: Y Yu Jiahua 提交者: Zheng Zengkai

Revert "sysctl: Refactor IAS framework"

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4QU5Z?from=project-issue
CVE: NA

--------------------------------

This patch revert ias feature from open-euler kernel.

This reverts commit 189fa7a4.
Signed-off-by: NYu Jiahua <Yujiahua1@huawei.com>
Reviewed-by: NChen Hui <judy.chenhui@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 d9244bc3
...@@ -557,7 +557,7 @@ extern int sysctl_panic_on_rcu_stall; ...@@ -557,7 +557,7 @@ extern int sysctl_panic_on_rcu_stall;
extern int sysctl_panic_on_stackoverflow; extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers; extern bool crash_kexec_post_notifiers;
#ifdef CONFIG_IAS_SMART_IDLE #ifdef CONFIG_IAS_SMART_HALT_POLL
extern unsigned long poll_threshold_ns; extern unsigned long poll_threshold_ns;
#endif #endif
......
...@@ -103,7 +103,7 @@ int sched_energy_aware_handler(struct ctl_table *table, int write, ...@@ -103,7 +103,7 @@ int sched_energy_aware_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos); void *buffer, size_t *lenp, loff_t *ppos);
#endif #endif
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING #ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
extern int sysctl_blocked_averages(struct ctl_table *table, int write, extern int sysctl_blocked_averages(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos); void __user *buffer, size_t *lenp, loff_t *ppos);
extern int sysctl_tick_update_load(struct ctl_table *table, int write, extern int sysctl_tick_update_load(struct ctl_table *table, int write,
......
...@@ -774,6 +774,22 @@ config GENERIC_SCHED_CLOCK ...@@ -774,6 +774,22 @@ config GENERIC_SCHED_CLOCK
menu "Scheduler features" menu "Scheduler features"
config SCHED_OPTIMIZE_LOAD_TRACKING
bool "Optimize scheduler load tracking"
default n
help
Optimize scheduler load tracking, when load balance is not important
in system, we close some load tracking in tick and enqueue or dequeue
task, in this way, we can save some unnecessary cpu overhead.
config IAS_SMART_HALT_POLL
bool "Enable smart halt poll"
default n
help
Before entering the real idle, polling for a while. if the current
task is set TIF_NEED_RESCHED during the polling process, it will
immediately break from the polling loop.
config UCLAMP_TASK config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks" bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL depends on CPU_FREQ_GOV_SCHEDUTIL
...@@ -823,26 +839,6 @@ config UCLAMP_BUCKETS_COUNT ...@@ -823,26 +839,6 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value. If in doubt, use the default value.
menu "Intelligent aware scheduler"
config IAS_SMART_IDLE
bool "Enable smart idle"
default n
help
Before entering the real idle, polling for a while. if the current
task is set TIF_NEED_RESCHED during the polling process, it will
immediately break from the polling loop.
config IAS_SMART_LOAD_TRACKING
bool "Enable smart load tracking"
default n
help
Optimize scheduler load tracking, when load balance is not important
in system, we close some load tracking in tick and enqueue or dequeue
task, in this way, we can save some unnecessary cpu overhead.
endmenu
endmenu endmenu
# #
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
unsigned int sysctl_sched_latency = 6000000ULL; unsigned int sysctl_sched_latency = 6000000ULL;
static unsigned int normalized_sysctl_sched_latency = 6000000ULL; static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING #ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#define LANTENCY_MIN 10 #define LANTENCY_MIN 10
#define LANTENCY_MAX 30 #define LANTENCY_MAX 30
unsigned int sysctl_load_tracking_latency = LANTENCY_MIN; unsigned int sysctl_load_tracking_latency = LANTENCY_MIN;
...@@ -3848,7 +3848,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s ...@@ -3848,7 +3848,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
{ {
u64 now = cfs_rq_clock_pelt(cfs_rq); u64 now = cfs_rq_clock_pelt(cfs_rq);
int decayed; int decayed;
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING #ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
u64 delta; u64 delta;
#endif #endif
...@@ -3856,7 +3856,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s ...@@ -3856,7 +3856,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* Track task load average for carrying it to new CPU after migrated, and * Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration * track group sched_entity load average for task_h_load calc in migration
*/ */
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING #ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
delta = now - se->avg.last_update_time; delta = now - se->avg.last_update_time;
delta >>= sysctl_load_tracking_latency; delta >>= sysctl_load_tracking_latency;
...@@ -4681,7 +4681,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) ...@@ -4681,7 +4681,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
cfs_rq->curr = NULL; cfs_rq->curr = NULL;
} }
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING #ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
DEFINE_STATIC_KEY_TRUE(sched_tick_update_load); DEFINE_STATIC_KEY_TRUE(sched_tick_update_load);
static void set_tick_update_load(bool enabled) static void set_tick_update_load(bool enabled)
{ {
...@@ -4724,7 +4724,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) ...@@ -4724,7 +4724,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
/* /*
* Ensure that runnable average is periodically updated. * Ensure that runnable average is periodically updated.
*/ */
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING #ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
if (static_branch_likely(&sched_tick_update_load)) { if (static_branch_likely(&sched_tick_update_load)) {
update_load_avg(cfs_rq, curr, UPDATE_TG); update_load_avg(cfs_rq, curr, UPDATE_TG);
update_cfs_group(curr); update_cfs_group(curr);
...@@ -8339,7 +8339,7 @@ static void attach_tasks(struct lb_env *env) ...@@ -8339,7 +8339,7 @@ static void attach_tasks(struct lb_env *env)
rq_unlock(env->dst_rq, &rf); rq_unlock(env->dst_rq, &rf);
} }
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING #ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
DEFINE_STATIC_KEY_TRUE(sched_blocked_averages); DEFINE_STATIC_KEY_TRUE(sched_blocked_averages);
static void set_blocked_averages(bool enabled) static void set_blocked_averages(bool enabled)
...@@ -8575,7 +8575,7 @@ static void update_blocked_averages(int cpu) ...@@ -8575,7 +8575,7 @@ static void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf); rq_lock_irqsave(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING #ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
if (!static_branch_likely(&sched_blocked_averages)) { if (!static_branch_likely(&sched_blocked_averages)) {
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
return; return;
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
/* Linker adds these: start and end of __cpuidle functions */ /* Linker adds these: start and end of __cpuidle functions */
extern char __cpuidle_text_start[], __cpuidle_text_end[]; extern char __cpuidle_text_start[], __cpuidle_text_end[];
#ifdef CONFIG_IAS_SMART_IDLE #ifdef CONFIG_IAS_SMART_HALT_POLL
/* /*
* Poll_threshold_ns indicates the maximum polling time before * Poll_threshold_ns indicates the maximum polling time before
* entering real idle. * entering real idle.
...@@ -60,7 +60,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused) ...@@ -60,7 +60,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused)
__setup("hlt", cpu_idle_nopoll_setup); __setup("hlt", cpu_idle_nopoll_setup);
#endif #endif
#ifdef CONFIG_IAS_SMART_IDLE #ifdef CONFIG_IAS_SMART_HALT_POLL
static void smart_idle_poll(void) static void smart_idle_poll(void)
{ {
unsigned long poll_duration = poll_threshold_ns; unsigned long poll_duration = poll_threshold_ns;
...@@ -86,7 +86,7 @@ static noinline int __cpuidle cpu_idle_poll(void) ...@@ -86,7 +86,7 @@ static noinline int __cpuidle cpu_idle_poll(void)
stop_critical_timings(); stop_critical_timings();
rcu_idle_enter(); rcu_idle_enter();
local_irq_enable(); local_irq_enable();
#ifdef CONFIG_IAS_SMART_IDLE #ifdef CONFIG_IAS_SMART_HALT_POLL
smart_idle_poll(); smart_idle_poll();
#endif #endif
...@@ -292,7 +292,7 @@ static void cpuidle_idle_call(void) ...@@ -292,7 +292,7 @@ static void cpuidle_idle_call(void)
static void do_idle(void) static void do_idle(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
#ifdef CONFIG_IAS_SMART_IDLE #ifdef CONFIG_IAS_SMART_HALT_POLL
unsigned long idle_poll_flag = poll_threshold_ns; unsigned long idle_poll_flag = poll_threshold_ns;
#endif #endif
/* /*
...@@ -327,7 +327,7 @@ static void do_idle(void) ...@@ -327,7 +327,7 @@ static void do_idle(void)
* broadcast device expired for us, we don't want to go deep * broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away. * idle as we know that the IPI is going to arrive right away.
*/ */
#ifdef CONFIG_IAS_SMART_IDLE #ifdef CONFIG_IAS_SMART_HALT_POLL
if (cpu_idle_force_poll || tick_check_broadcast_expired() || if (cpu_idle_force_poll || tick_check_broadcast_expired() ||
idle_poll_flag) { idle_poll_flag) {
#else #else
...@@ -335,7 +335,7 @@ static void do_idle(void) ...@@ -335,7 +335,7 @@ static void do_idle(void)
#endif #endif
tick_nohz_idle_restart_tick(); tick_nohz_idle_restart_tick();
cpu_idle_poll(); cpu_idle_poll();
#ifdef CONFIG_IAS_SMART_IDLE #ifdef CONFIG_IAS_SMART_HALT_POLL
idle_poll_flag = 0; idle_poll_flag = 0;
#endif #endif
} else { } else {
......
...@@ -1659,46 +1659,6 @@ int proc_do_static_key(struct ctl_table *table, int write, ...@@ -1659,46 +1659,6 @@ int proc_do_static_key(struct ctl_table *table, int write,
mutex_unlock(&static_key_mutex); mutex_unlock(&static_key_mutex);
return ret; return ret;
} }
static struct ctl_table ias_table[] = {
#ifdef CONFIG_IAS_SMART_IDLE
{
.procname = "smart_idle_threshold",
.data = &poll_threshold_ns,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
#endif
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
{
.procname = "sched_blocked_averages",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_blocked_averages,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sched_tick_update_load",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_tick_update_load,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sched_load_tracking_latency",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_update_load_latency,
},
#endif
{ }
};
static struct ctl_table kern_table[] = { static struct ctl_table kern_table[] = {
{ {
...@@ -1813,6 +1773,33 @@ static struct ctl_table kern_table[] = { ...@@ -1813,6 +1773,33 @@ static struct ctl_table kern_table[] = {
}, },
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */ #endif /* CONFIG_SCHED_DEBUG */
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
{
.procname = "sched_blocked_averages",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_blocked_averages,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sched_tick_update_load",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_tick_update_load,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sched_load_tracking_latency",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_update_load_latency,
},
#endif
{ {
.procname = "sched_rt_period_us", .procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period, .data = &sysctl_sched_rt_period,
...@@ -1871,7 +1858,15 @@ static struct ctl_table kern_table[] = { ...@@ -1871,7 +1858,15 @@ static struct ctl_table kern_table[] = {
.proc_handler = sysctl_sched_uclamp_handler, .proc_handler = sysctl_sched_uclamp_handler,
}, },
#endif #endif
#ifdef CONFIG_IAS_SMART_HALT_POLL
{
.procname = "halt_poll_threshold",
.data = &poll_threshold_ns,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
#endif
#ifdef CONFIG_SCHED_AUTOGROUP #ifdef CONFIG_SCHED_AUTOGROUP
{ {
.procname = "sched_autogroup_enabled", .procname = "sched_autogroup_enabled",
...@@ -2713,11 +2708,6 @@ static struct ctl_table kern_table[] = { ...@@ -2713,11 +2708,6 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE, .extra2 = SYSCTL_ONE,
}, },
#endif #endif
{
.procname = "ias",
.mode = 0555,
.child = ias_table,
},
#ifdef CONFIG_QOS_SCHED #ifdef CONFIG_QOS_SCHED
{ {
.procname = "qos_overload_detect_period_ms", .procname = "qos_overload_detect_period_ms",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册