提交 2070ee01 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: cleanup old and rarely used 'debug' features.

TREE_AVG and APPROX_AVG are initial task placement policies that have been
disabled for a long while.. time to remove them.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
CC: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 ae51801b
...@@ -594,18 +594,14 @@ enum { ...@@ -594,18 +594,14 @@ enum {
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
SCHED_FEAT_WAKEUP_PREEMPT = 2, SCHED_FEAT_WAKEUP_PREEMPT = 2,
SCHED_FEAT_START_DEBIT = 4, SCHED_FEAT_START_DEBIT = 4,
SCHED_FEAT_TREE_AVG = 8, SCHED_FEAT_HRTICK = 8,
SCHED_FEAT_APPROX_AVG = 16, SCHED_FEAT_DOUBLE_TICK = 16,
SCHED_FEAT_HRTICK = 32,
SCHED_FEAT_DOUBLE_TICK = 64,
}; };
const_debug unsigned int sysctl_sched_features = const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
SCHED_FEAT_WAKEUP_PREEMPT * 1 | SCHED_FEAT_WAKEUP_PREEMPT * 1 |
SCHED_FEAT_START_DEBIT * 1 | SCHED_FEAT_START_DEBIT * 1 |
SCHED_FEAT_TREE_AVG * 0 |
SCHED_FEAT_APPROX_AVG * 0 |
SCHED_FEAT_HRTICK * 1 | SCHED_FEAT_HRTICK * 1 |
SCHED_FEAT_DOUBLE_TICK * 0; SCHED_FEAT_DOUBLE_TICK * 0;
......
...@@ -302,11 +302,6 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) ...@@ -302,11 +302,6 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
return vslice; return vslice;
} }
static u64 sched_vslice(struct cfs_rq *cfs_rq)
{
return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
}
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
return __sched_vslice(cfs_rq->load.weight + se->load.weight, return __sched_vslice(cfs_rq->load.weight + se->load.weight,
...@@ -504,15 +499,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) ...@@ -504,15 +499,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
} else } else
vruntime = cfs_rq->min_vruntime; vruntime = cfs_rq->min_vruntime;
if (sched_feat(TREE_AVG)) {
struct sched_entity *last = __pick_last_entity(cfs_rq);
if (last) {
vruntime += last->vruntime;
vruntime >>= 1;
}
} else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
vruntime += sched_vslice(cfs_rq)/2;
/* /*
* The 'current' period is already promised to the current tasks, * The 'current' period is already promised to the current tasks,
* however the extra weight of the new task will slow them down a * however the extra weight of the new task will slow them down a
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册