提交 80bf3171 编写于 作者: I Ingo Molnar

sched: clean up pull_rt_task()

clean up pull_rt_task().
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 00597c3e
...@@ -576,12 +576,9 @@ static void push_rt_tasks(struct rq *rq) ...@@ -576,12 +576,9 @@ static void push_rt_tasks(struct rq *rq)
static int pull_rt_task(struct rq *this_rq) static int pull_rt_task(struct rq *this_rq)
{ {
struct task_struct *next; int this_cpu = this_rq->cpu, ret = 0, cpu;
struct task_struct *p; struct task_struct *p, *next;
struct rq *src_rq; struct rq *src_rq;
int this_cpu = this_rq->cpu;
int cpu;
int ret = 0;
/* /*
* If cpusets are used, and we have overlapping * If cpusets are used, and we have overlapping
...@@ -608,23 +605,25 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -608,23 +605,25 @@ static int pull_rt_task(struct rq *this_rq)
if (double_lock_balance(this_rq, src_rq)) { if (double_lock_balance(this_rq, src_rq)) {
/* unlocked our runqueue lock */ /* unlocked our runqueue lock */
struct task_struct *old_next = next; struct task_struct *old_next = next;
next = pick_next_task_rt(this_rq); next = pick_next_task_rt(this_rq);
if (next != old_next) if (next != old_next)
ret = 1; ret = 1;
} }
if (likely(src_rq->rt.rt_nr_running <= 1)) if (likely(src_rq->rt.rt_nr_running <= 1)) {
/* /*
* Small chance that this_rq->curr changed * Small chance that this_rq->curr changed
* but it's really harmless here. * but it's really harmless here.
*/ */
rt_clear_overload(this_rq); rt_clear_overload(this_rq);
else } else {
/* /*
* Heh, the src_rq is now overloaded, since * Heh, the src_rq is now overloaded, since
* we already have the src_rq lock, go straight * we already have the src_rq lock, go straight
* to pulling tasks from it. * to pulling tasks from it.
*/ */
goto try_pulling; goto try_pulling;
}
spin_unlock(&src_rq->lock); spin_unlock(&src_rq->lock);
continue; continue;
} }
...@@ -638,6 +637,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -638,6 +637,7 @@ static int pull_rt_task(struct rq *this_rq)
*/ */
if (double_lock_balance(this_rq, src_rq)) { if (double_lock_balance(this_rq, src_rq)) {
struct task_struct *old_next = next; struct task_struct *old_next = next;
next = pick_next_task_rt(this_rq); next = pick_next_task_rt(this_rq);
if (next != old_next) if (next != old_next)
ret = 1; ret = 1;
...@@ -674,7 +674,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -674,7 +674,7 @@ static int pull_rt_task(struct rq *this_rq)
*/ */
if (p->prio < src_rq->curr->prio || if (p->prio < src_rq->curr->prio ||
(next && next->prio < src_rq->curr->prio)) (next && next->prio < src_rq->curr->prio))
goto bail; goto out;
ret = 1; ret = 1;
...@@ -686,9 +686,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -686,9 +686,7 @@ static int pull_rt_task(struct rq *this_rq)
* case there's an even higher prio task * case there's an even higher prio task
* in another runqueue. (low likelyhood * in another runqueue. (low likelyhood
* but possible) * but possible)
*/ *
/*
* Update next so that we won't pick a task * Update next so that we won't pick a task
* on another cpu with a priority lower (or equal) * on another cpu with a priority lower (or equal)
* than the one we just picked. * than the one we just picked.
...@@ -696,7 +694,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -696,7 +694,7 @@ static int pull_rt_task(struct rq *this_rq)
next = p; next = p;
} }
bail: out:
spin_unlock(&src_rq->lock); spin_unlock(&src_rq->lock);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册