提交 13b8bd0a 编写于 作者: R Rusty Russell 提交者: Ingo Molnar

sched_rt: don't allocate cpumask in fastpath

Impact: cleanup

As pointed out by Steven Rostedt.  Since the arg in question is
unused, we simply change cpupri_find() to accept NULL.
Reported-by: NSteven Rostedt <srostedt@redhat.com>
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
LKML-Reference: <200903251501.22664.rusty@rustcorp.com.au>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 a18b83b7
...@@ -55,7 +55,7 @@ static int convert_prio(int prio) ...@@ -55,7 +55,7 @@ static int convert_prio(int prio)
* cpupri_find - find the best (lowest-pri) CPU in the system * cpupri_find - find the best (lowest-pri) CPU in the system
* @cp: The cpupri context * @cp: The cpupri context
* @p: The task * @p: The task
* @lowest_mask: A mask to fill in with selected CPUs * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
* *
* Note: This function returns the recommended CPUs as calculated during the * Note: This function returns the recommended CPUs as calculated during the
* current invokation. By the time the call returns, the CPUs may have in * current invokation. By the time the call returns, the CPUs may have in
...@@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, ...@@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
continue; continue;
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); if (lowest_mask)
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
return 1; return 1;
} }
......
...@@ -805,20 +805,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync) ...@@ -805,20 +805,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{ {
cpumask_var_t mask;
if (rq->curr->rt.nr_cpus_allowed == 1) if (rq->curr->rt.nr_cpus_allowed == 1)
return; return;
if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
return;
if (p->rt.nr_cpus_allowed != 1 if (p->rt.nr_cpus_allowed != 1
&& cpupri_find(&rq->rd->cpupri, p, mask)) && cpupri_find(&rq->rd->cpupri, p, NULL))
goto free; return;
if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
goto free; return;
/* /*
* There appears to be other cpus that can accept * There appears to be other cpus that can accept
...@@ -827,8 +822,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) ...@@ -827,8 +822,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
*/ */
requeue_task_rt(rq, p, 1); requeue_task_rt(rq, p, 1);
resched_task(rq->curr); resched_task(rq->curr);
free:
free_cpumask_var(mask);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册