提交 07b4032c 编写于 作者: G Gregory Haskins 提交者: Ingo Molnar

sched: break out search for RT tasks

Isolate the search logic into a function so that it can be used later
in places other than find_locked_lowest_rq().
Signed-off-by: NGregory Haskins <ghaskins@novell.com>
Signed-off-by: NSteven Rostedt <srostedt@redhat.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 e7693a36
...@@ -263,54 +263,66 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, ...@@ -263,54 +263,66 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
/* Will lock the rq it finds */ static int find_lowest_rq(struct task_struct *task)
static struct rq *find_lock_lowest_rq(struct task_struct *task,
struct rq *this_rq)
{ {
struct rq *lowest_rq = NULL;
int cpu; int cpu;
int tries;
cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask); cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
struct rq *lowest_rq = NULL;
cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed); cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);
for (tries = 0; tries < RT_MAX_TRIES; tries++) { /*
/* * Scan each rq for the lowest prio.
* Scan each rq for the lowest prio. */
*/ for_each_cpu_mask(cpu, *cpu_mask) {
for_each_cpu_mask(cpu, *cpu_mask) { struct rq *rq = cpu_rq(cpu);
struct rq *rq = &per_cpu(runqueues, cpu);
if (cpu == this_rq->cpu) if (cpu == rq->cpu)
continue; continue;
/* We look for lowest RT prio or non-rt CPU */ /* We look for lowest RT prio or non-rt CPU */
if (rq->rt.highest_prio >= MAX_RT_PRIO) { if (rq->rt.highest_prio >= MAX_RT_PRIO) {
lowest_rq = rq; lowest_rq = rq;
break; break;
} }
/* no locking for now */ /* no locking for now */
if (rq->rt.highest_prio > task->prio && if (rq->rt.highest_prio > task->prio &&
(!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) { (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
lowest_rq = rq; lowest_rq = rq;
}
} }
}
return lowest_rq ? lowest_rq->cpu : -1;
}
/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task,
struct rq *rq)
{
struct rq *lowest_rq = NULL;
int cpu;
int tries;
if (!lowest_rq) for (tries = 0; tries < RT_MAX_TRIES; tries++) {
cpu = find_lowest_rq(task);
if (cpu == -1)
break; break;
lowest_rq = cpu_rq(cpu);
/* if the prio of this runqueue changed, try again */ /* if the prio of this runqueue changed, try again */
if (double_lock_balance(this_rq, lowest_rq)) { if (double_lock_balance(rq, lowest_rq)) {
/* /*
* We had to unlock the run queue. In * We had to unlock the run queue. In
* the mean time, task could have * the mean time, task could have
* migrated already or had its affinity changed. * migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq. * Also make sure that it wasn't scheduled on its rq.
*/ */
if (unlikely(task_rq(task) != this_rq || if (unlikely(task_rq(task) != rq ||
!cpu_isset(lowest_rq->cpu, task->cpus_allowed) || !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
task_running(this_rq, task) || task_running(rq, task) ||
!task->se.on_rq)) { !task->se.on_rq)) {
spin_unlock(&lowest_rq->lock); spin_unlock(&lowest_rq->lock);
lowest_rq = NULL; lowest_rq = NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册