提交 103638d9 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: fix wakeup granularity and buddy granularity

Uncouple buddy selection from wakeup granularity.

The initial idea was that buddies could run ahead as far as a normal task
can - do this by measuring a pair 'slice' just as we do for a normal task.

This means we can drop the wakeup_granularity back to 5ms.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 76a2a6ee
...@@ -375,6 +375,7 @@ struct cfs_rq { ...@@ -375,6 +375,7 @@ struct cfs_rq {
u64 exec_clock; u64 exec_clock;
u64 min_vruntime; u64 min_vruntime;
u64 pair_start;
struct rb_root tasks_timeline; struct rb_root tasks_timeline;
struct rb_node *rb_leftmost; struct rb_node *rb_leftmost;
......
...@@ -63,13 +63,13 @@ unsigned int __read_mostly sysctl_sched_compat_yield; ...@@ -63,13 +63,13 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
/* /*
* SCHED_OTHER wake-up granularity. * SCHED_OTHER wake-up granularity.
* (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
* *
* This option delays the preemption effects of decoupled workloads * This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still * and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies. * have immediate wakeup/sleep latencies.
*/ */
unsigned int sysctl_sched_wakeup_granularity = 10000000UL; unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
...@@ -813,17 +813,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -813,17 +813,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime; se->prev_sum_exec_runtime = se->sum_exec_runtime;
} }
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
static struct sched_entity * static struct sched_entity *
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
if (!cfs_rq->next) struct rq *rq = rq_of(cfs_rq);
return se; u64 pair_slice = rq->clock - cfs_rq->pair_start;
if (wakeup_preempt_entity(cfs_rq->next, se) != 0) if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) {
cfs_rq->pair_start = rq->clock;
return se; return se;
}
return cfs_rq->next; return cfs_rq->next;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册