提交 3a7e73a2 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Clean up check_preempt_wakeup()

Streamline the wakeup preemption code a bit, unifying the preempt path
so that they all do the same.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 a65ac745
...@@ -1651,10 +1651,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ ...@@ -1651,10 +1651,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
int sync = wake_flags & WF_SYNC; int sync = wake_flags & WF_SYNC;
int scale = cfs_rq->nr_running >= sched_nr_latency; int scale = cfs_rq->nr_running >= sched_nr_latency;
if (unlikely(rt_prio(p->prio))) { if (unlikely(rt_prio(p->prio)))
resched_task(curr); goto preempt;
return;
}
if (unlikely(p->sched_class != &fair_sched_class)) if (unlikely(p->sched_class != &fair_sched_class))
return; return;
...@@ -1680,52 +1678,47 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ ...@@ -1680,52 +1678,47 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return; return;
/* Idle tasks are by definition preempted by everybody. */ /* Idle tasks are by definition preempted by everybody. */
if (unlikely(curr->policy == SCHED_IDLE)) { if (unlikely(curr->policy == SCHED_IDLE))
resched_task(curr); goto preempt;
return;
}
if ((sched_feat(WAKEUP_SYNC) && sync) || if (sched_feat(WAKEUP_SYNC) && sync)
(sched_feat(WAKEUP_OVERLAP) && goto preempt;
(se->avg_overlap < sysctl_sched_migration_cost &&
pse->avg_overlap < sysctl_sched_migration_cost))) {
resched_task(curr);
return;
}
if (sched_feat(WAKEUP_RUNNING)) { if (sched_feat(WAKEUP_OVERLAP) &&
if (pse->avg_running < se->avg_running) { se->avg_overlap < sysctl_sched_migration_cost &&
set_next_buddy(pse); pse->avg_overlap < sysctl_sched_migration_cost)
resched_task(curr); goto preempt;
return;
} if (sched_feat(WAKEUP_RUNNING) && pse->avg_running < se->avg_running)
} goto preempt;
if (!sched_feat(WAKEUP_PREEMPT)) if (!sched_feat(WAKEUP_PREEMPT))
return; return;
update_curr(cfs_rq);
find_matching_se(&se, &pse); find_matching_se(&se, &pse);
BUG_ON(!pse); BUG_ON(!pse);
if (wakeup_preempt_entity(se, pse) == 1)
goto preempt;
update_curr(cfs_rq); return;
if (wakeup_preempt_entity(se, pse) == 1) { preempt:
resched_task(curr); resched_task(curr);
/* /*
* Only set the backward buddy when the current task is still * Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved * on the rq. This can happen when a wakeup gets interleaved
* with schedule on the ->pre_schedule() or idle_balance() * with schedule on the ->pre_schedule() or idle_balance()
* point, either of which can * drop the rq lock. * point, either of which can * drop the rq lock.
* *
* Also, during early boot the idle thread is in the fair class, * Also, during early boot the idle thread is in the fair class,
* for obvious reasons its a bad idea to schedule back to it. * for obvious reasons its a bad idea to schedule back to it.
*/ */
if (unlikely(!se->on_rq || curr == rq->idle)) if (unlikely(!se->on_rq || curr == rq->idle))
return; return;
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se); if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
} set_last_buddy(se);
} }
static struct task_struct *pick_next_task_fair(struct rq *rq) static struct task_struct *pick_next_task_fair(struct rq *rq)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册