提交 b42e0c41 编写于 作者: M Mike Galbraith 提交者: Ingo Molnar

sched: Remove avg_wakeup

Testing the load which led to this heuristic (nfs4 kbuild) shows that it has
outlived it's usefullness.  With intervening load balancing changes, I cannot
see any difference with/without, so recover there fastpath cycles.
Signed-off-by: NMike Galbraith <efault@gmx.de>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1268301062.6785.29.camel@marge.simson.net>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 39c0cbe2
...@@ -1185,9 +1185,6 @@ struct sched_entity { ...@@ -1185,9 +1185,6 @@ struct sched_entity {
u64 nr_migrations; u64 nr_migrations;
u64 start_runtime;
u64 avg_wakeup;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
struct sched_statistics statistics; struct sched_statistics statistics;
#endif #endif
......
...@@ -1880,9 +1880,6 @@ static void update_avg(u64 *avg, u64 sample) ...@@ -1880,9 +1880,6 @@ static void update_avg(u64 *avg, u64 sample)
static void static void
enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{ {
if (wakeup)
p->se.start_runtime = p->se.sum_exec_runtime;
sched_info_queued(p); sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, wakeup, head); p->sched_class->enqueue_task(rq, p, wakeup, head);
p->se.on_rq = 1; p->se.on_rq = 1;
...@@ -1890,17 +1887,11 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) ...@@ -1890,17 +1887,11 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{ {
if (sleep) { if (sleep && p->se.last_wakeup) {
if (p->se.last_wakeup) {
update_avg(&p->se.avg_overlap, update_avg(&p->se.avg_overlap,
p->se.sum_exec_runtime - p->se.last_wakeup); p->se.sum_exec_runtime - p->se.last_wakeup);
p->se.last_wakeup = 0; p->se.last_wakeup = 0;
} else {
update_avg(&p->se.avg_wakeup,
sysctl_sched_wakeup_granularity);
}
} }
sched_info_dequeued(p); sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, sleep); p->sched_class->dequeue_task(rq, p, sleep);
p->se.on_rq = 0; p->se.on_rq = 0;
...@@ -2466,13 +2457,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, ...@@ -2466,13 +2457,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
*/ */
if (!in_interrupt()) { if (!in_interrupt()) {
struct sched_entity *se = &current->se; struct sched_entity *se = &current->se;
u64 sample = se->sum_exec_runtime;
if (se->last_wakeup)
sample -= se->last_wakeup;
else
sample -= se->start_runtime;
update_avg(&se->avg_wakeup, sample);
se->last_wakeup = se->sum_exec_runtime; se->last_wakeup = se->sum_exec_runtime;
} }
...@@ -2540,8 +2524,6 @@ static void __sched_fork(struct task_struct *p) ...@@ -2540,8 +2524,6 @@ static void __sched_fork(struct task_struct *p)
p->se.nr_migrations = 0; p->se.nr_migrations = 0;
p->se.last_wakeup = 0; p->se.last_wakeup = 0;
p->se.avg_overlap = 0; p->se.avg_overlap = 0;
p->se.start_runtime = 0;
p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics)); memset(&p->se.statistics, 0, sizeof(p->se.statistics));
......
...@@ -408,7 +408,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -408,7 +408,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.vruntime); PN(se.vruntime);
PN(se.sum_exec_runtime); PN(se.sum_exec_runtime);
PN(se.avg_overlap); PN(se.avg_overlap);
PN(se.avg_wakeup);
nr_switches = p->nvcsw + p->nivcsw; nr_switches = p->nvcsw + p->nivcsw;
......
...@@ -1592,42 +1592,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag ...@@ -1592,42 +1592,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/*
* Adaptive granularity
*
* se->avg_wakeup gives the average time a task runs until it does a wakeup,
* with the limit of wakeup_gran -- when it never does a wakeup.
*
* So the smaller avg_wakeup is the faster we want this task to preempt,
* but we don't want to treat the preemptee unfairly and therefore allow it
* to run for at least the amount of time we'd like to run.
*
* NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
*
* NOTE: we use *nr_running to scale with load, this nicely matches the
* degrading latency on load.
*/
static unsigned long
adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
{
u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
u64 gran = 0;
if (this_run < expected_wakeup)
gran = expected_wakeup - this_run;
return min_t(s64, gran, sysctl_sched_wakeup_granularity);
}
static unsigned long static unsigned long
wakeup_gran(struct sched_entity *curr, struct sched_entity *se) wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
{ {
unsigned long gran = sysctl_sched_wakeup_granularity; unsigned long gran = sysctl_sched_wakeup_granularity;
if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
gran = adaptive_gran(curr, se);
/* /*
* Since its curr running now, convert the gran from real-time * Since its curr running now, convert the gran from real-time
* to virtual-time in his units. * to virtual-time in his units.
......
...@@ -30,12 +30,6 @@ SCHED_FEAT(START_DEBIT, 1) ...@@ -30,12 +30,6 @@ SCHED_FEAT(START_DEBIT, 1)
*/ */
SCHED_FEAT(WAKEUP_PREEMPT, 1) SCHED_FEAT(WAKEUP_PREEMPT, 1)
/*
* Compute wakeup_gran based on task behaviour, clipped to
* [0, sched_wakeup_gran_ns]
*/
SCHED_FEAT(ADAPTIVE_GRAN, 1)
/* /*
* When converting the wakeup granularity to virtual time, do it such * When converting the wakeup granularity to virtual time, do it such
* that heavier tasks preempting a lighter task have an edge. * that heavier tasks preempting a lighter task have an edge.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册