提交 ce0dbbbb 编写于 作者: C Clark Williams 提交者: Ingo Molnar

sched/rt: Add a tuning knob to allow changing SCHED_RR timeslice

Add a /proc/sys/kernel scheduler knob named
sched_rr_timeslice_ms that allows global changing of the
SCHED_RR timeslice value. User visable value is in milliseconds
but is stored as jiffies.  Setting to 0 (zero) resets to the
default (currently 100ms).
Signed-off-by: NClark Williams <williams@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20130207094704.13751796@riff.lanSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 cf4aebc2
......@@ -73,6 +73,13 @@ static inline unsigned int get_sysctl_timer_migration(void)
return 1;
}
#endif
/*
* control realtime throttling:
*
* /proc/sys/kernel/sched_rt_period_us
* /proc/sys/kernel/sched_rt_runtime_us
*/
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
......@@ -90,7 +97,13 @@ extern unsigned int sysctl_sched_autogroup_enabled;
*/
#define RR_TIMESLICE (100 * HZ / 1000)
int sched_rt_handler(struct ctl_table *table, int write,
extern int sched_rr_timeslice;
extern int sched_rr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
......
......@@ -7509,6 +7509,25 @@ static int sched_rt_global_constraints(void)
}
#endif /* CONFIG_RT_GROUP_SCHED */
int sched_rr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
/* make sure that internally we keep jiffies */
/* also, writing zero resets timeslice to default */
if (!ret && write) {
sched_rr_timeslice = sched_rr_timeslice <= 0 ?
RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
}
mutex_unlock(&mutex);
return ret;
}
int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
......
......@@ -7,6 +7,8 @@
#include <linux/slab.h>
int sched_rr_timeslice = RR_TIMESLICE;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
struct rt_bandwidth def_rt_bandwidth;
......@@ -2016,7 +2018,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
if (--p->rt.time_slice)
return;
p->rt.time_slice = RR_TIMESLICE;
p->rt.time_slice = sched_rr_timeslice;
/*
* Requeue to the end of queue if we (and all of our ancestors) are the
......@@ -2047,7 +2049,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
* Time slice is 0 for SCHED_FIFO tasks
*/
if (task->policy == SCHED_RR)
return RR_TIMESLICE;
return sched_rr_timeslice;
else
return 0;
}
......
......@@ -404,6 +404,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{
.procname = "sched_rr_timeslice_ms",
.data = &sched_rr_timeslice,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = sched_rr_handler,
},
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册