提交 1e5a7405 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Fix cross-sched-class wakeup preemption

Instead of dealing with sched classes inside each check_preempt_curr()
implementation, pull out this logic into the generic wakeup preemption
path.

This fixes a hang in KVM (and others) where we are waiting for the
stop machine thread to run ...
Reported-by: NMarkus Trippelsdorf <markus@trippelsdorf.de>
Tested-by: NMarcelo Tosatti <mtosatti@redhat.com>
Tested-by: NSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1288891946.2039.31.camel@laptop>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 2d467090
...@@ -560,18 +560,8 @@ struct rq { ...@@ -560,18 +560,8 @@ struct rq {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static inline
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
/* static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (test_tsk_need_resched(p))
rq->skip_clock_update = 1;
}
static inline int cpu_of(struct rq *rq) static inline int cpu_of(struct rq *rq)
{ {
...@@ -2118,6 +2108,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, ...@@ -2118,6 +2108,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p->sched_class->prio_changed(rq, p, oldprio, running); p->sched_class->prio_changed(rq, p, oldprio, running);
} }
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
const struct sched_class *class;
if (p->sched_class == rq->curr->sched_class) {
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
} else {
for_each_class(class) {
if (class == rq->curr->sched_class)
break;
if (class == p->sched_class) {
resched_task(rq->curr);
break;
}
}
}
/*
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* Is this task likely cache-hot: * Is this task likely cache-hot:
......
...@@ -1654,12 +1654,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ ...@@ -1654,12 +1654,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int scale = cfs_rq->nr_running >= sched_nr_latency; int scale = cfs_rq->nr_running >= sched_nr_latency;
if (unlikely(rt_prio(p->prio)))
goto preempt;
if (unlikely(p->sched_class != &fair_sched_class))
return;
if (unlikely(se == pse)) if (unlikely(se == pse))
return; return;
......
...@@ -19,7 +19,7 @@ select_task_rq_stop(struct rq *rq, struct task_struct *p, ...@@ -19,7 +19,7 @@ select_task_rq_stop(struct rq *rq, struct task_struct *p,
static void static void
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
{ {
resched_task(rq->curr); /* we preempt everything */ /* we're never preempted */
} }
static struct task_struct *pick_next_task_stop(struct rq *rq) static struct task_struct *pick_next_task_stop(struct rq *rq)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册