提交 189fa7a4 编写于 作者: Z Zheng Zucheng 提交者: Zheng Zengkai

sysctl: Refactor IAS framework

hulk inclusion
category: feature
bugzilla: 177206 https://gitee.com/openeuler/kernel/issues/I4DDEL

--------------------------------

Refactor intelligent aware scheduler framework
Signed-off-by: NZheng Zucheng <zhengzucheng@huawei.com>
Reviewed-by: NChen Hui <judy.chenhui@huawei.com>
Signed-off-by: NChen Jun <chenjun102@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 f4f98f3b
......@@ -555,7 +555,7 @@ extern int sysctl_panic_on_rcu_stall;
extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers;
#ifdef CONFIG_IAS_SMART_HALT_POLL
#ifdef CONFIG_IAS_SMART_IDLE
extern unsigned long poll_threshold_ns;
#endif
......
......@@ -98,7 +98,7 @@ int sched_energy_aware_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
extern int sysctl_blocked_averages(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
extern int sysctl_tick_update_load(struct ctl_table *table, int write,
......
......@@ -774,22 +774,6 @@ config GENERIC_SCHED_CLOCK
menu "Scheduler features"
config SCHED_OPTIMIZE_LOAD_TRACKING
bool "Optimize scheduler load tracking"
default n
help
Optimize scheduler load tracking, when load balance is not important
in system, we close some load tracking in tick and enqueue or dequeue
task, in this way, we can save some unnecessary cpu overhead.
config IAS_SMART_HALT_POLL
bool "Enable smart halt poll"
default n
help
Before entering the real idle, polling for a while. if the current
task is set TIF_NEED_RESCHED during the polling process, it will
immediately break from the polling loop.
config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL
......@@ -839,6 +823,26 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value.
menu "Intelligent aware scheduler"
config IAS_SMART_IDLE
bool "Enable smart idle"
default n
help
Before entering the real idle, polling for a while. if the current
task is set TIF_NEED_RESCHED during the polling process, it will
immediately break from the polling loop.
config IAS_SMART_LOAD_TRACKING
bool "Enable smart load tracking"
default n
help
Optimize scheduler load tracking, when load balance is not important
in system, we close some load tracking in tick and enqueue or dequeue
task, in this way, we can save some unnecessary cpu overhead.
endmenu
endmenu
#
......
......@@ -38,7 +38,7 @@
unsigned int sysctl_sched_latency = 6000000ULL;
static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
#define LANTENCY_MIN 10
#define LANTENCY_MAX 30
unsigned int sysctl_load_tracking_latency = LANTENCY_MIN;
......@@ -3837,7 +3837,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
{
u64 now = cfs_rq_clock_pelt(cfs_rq);
int decayed;
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
u64 delta;
#endif
......@@ -3845,7 +3845,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
delta = now - se->avg.last_update_time;
delta >>= sysctl_load_tracking_latency;
......@@ -4601,7 +4601,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
cfs_rq->curr = NULL;
}
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
DEFINE_STATIC_KEY_TRUE(sched_tick_update_load);
static void set_tick_update_load(bool enabled)
{
......@@ -4644,7 +4644,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
/*
* Ensure that runnable average is periodically updated.
*/
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
if (static_branch_likely(&sched_tick_update_load)) {
update_load_avg(cfs_rq, curr, UPDATE_TG);
update_cfs_group(curr);
......@@ -8090,7 +8090,7 @@ static void attach_tasks(struct lb_env *env)
rq_unlock(env->dst_rq, &rf);
}
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
DEFINE_STATIC_KEY_TRUE(sched_blocked_averages);
static void set_blocked_averages(bool enabled)
......@@ -8326,7 +8326,7 @@ static void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
if (!static_branch_likely(&sched_blocked_averages)) {
rq_unlock_irqrestore(rq, &rf);
return;
......
......@@ -13,7 +13,7 @@
/* Linker adds these: start and end of __cpuidle functions */
extern char __cpuidle_text_start[], __cpuidle_text_end[];
#ifdef CONFIG_IAS_SMART_HALT_POLL
#ifdef CONFIG_IAS_SMART_IDLE
/*
* Poll_threshold_ns indicates the maximum polling time before
* entering real idle.
......@@ -60,7 +60,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused)
__setup("hlt", cpu_idle_nopoll_setup);
#endif
#ifdef CONFIG_IAS_SMART_HALT_POLL
#ifdef CONFIG_IAS_SMART_IDLE
static void smart_idle_poll(void)
{
unsigned long poll_duration = poll_threshold_ns;
......@@ -86,7 +86,7 @@ static noinline int __cpuidle cpu_idle_poll(void)
stop_critical_timings();
rcu_idle_enter();
local_irq_enable();
#ifdef CONFIG_IAS_SMART_HALT_POLL
#ifdef CONFIG_IAS_SMART_IDLE
smart_idle_poll();
#endif
......@@ -292,7 +292,7 @@ static void cpuidle_idle_call(void)
static void do_idle(void)
{
int cpu = smp_processor_id();
#ifdef CONFIG_IAS_SMART_HALT_POLL
#ifdef CONFIG_IAS_SMART_IDLE
unsigned long idle_poll_flag = poll_threshold_ns;
#endif
/*
......@@ -327,7 +327,7 @@ static void do_idle(void)
* broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away.
*/
#ifdef CONFIG_IAS_SMART_HALT_POLL
#ifdef CONFIG_IAS_SMART_IDLE
if (cpu_idle_force_poll || tick_check_broadcast_expired() ||
idle_poll_flag) {
#else
......@@ -335,7 +335,7 @@ static void do_idle(void)
#endif
tick_nohz_idle_restart_tick();
cpu_idle_poll();
#ifdef CONFIG_IAS_SMART_HALT_POLL
#ifdef CONFIG_IAS_SMART_IDLE
idle_poll_flag = 0;
#endif
} else {
......
......@@ -1650,6 +1650,46 @@ int proc_do_static_key(struct ctl_table *table, int write,
mutex_unlock(&static_key_mutex);
return ret;
}
static struct ctl_table ias_table[] = {
#ifdef CONFIG_IAS_SMART_IDLE
{
.procname = "smart_idle_threshold",
.data = &poll_threshold_ns,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
#endif
#ifdef CONFIG_IAS_SMART_LOAD_TRACKING
{
.procname = "sched_blocked_averages",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_blocked_averages,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sched_tick_update_load",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_tick_update_load,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sched_load_tracking_latency",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_update_load_latency,
},
#endif
{ }
};
static struct ctl_table kern_table[] = {
{
......@@ -1764,33 +1804,6 @@ static struct ctl_table kern_table[] = {
},
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */
#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING
{
.procname = "sched_blocked_averages",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_blocked_averages,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sched_tick_update_load",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_tick_update_load,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "sched_load_tracking_latency",
.data = NULL,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sysctl_update_load_latency,
},
#endif
{
.procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period,
......@@ -1849,15 +1862,7 @@ static struct ctl_table kern_table[] = {
.proc_handler = sysctl_sched_uclamp_handler,
},
#endif
#ifdef CONFIG_IAS_SMART_HALT_POLL
{
.procname = "halt_poll_threshold",
.data = &poll_threshold_ns,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
......@@ -2697,6 +2702,11 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
{
.procname = "ias",
.mode = 0555,
.child = ias_table,
},
{ }
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册