提交 38022906 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Fix sched_exec() balancing

Since we access ->cpus_allowed without holding rq->lock we need
a retry loop to validate the result, this comes for near free
when we merge sched_migrate_task() into sched_exec() since that
already does the needed check.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
LKML-Reference: <20091216170517.884743662@chello.nl>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 e2912009
...@@ -2322,7 +2322,7 @@ void task_oncpu_function_call(struct task_struct *p, ...@@ -2322,7 +2322,7 @@ void task_oncpu_function_call(struct task_struct *p,
* *
* - fork, @p is stable because it isn't on the tasklist yet * - fork, @p is stable because it isn't on the tasklist yet
* *
* - exec, @p is unstable XXX * - exec, @p is unstable, retry loop
* *
* - wake-up, we serialize ->cpus_allowed against TASK_WAKING so * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
* we should be good. * we should be good.
...@@ -3132,21 +3132,36 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) ...@@ -3132,21 +3132,36 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
} }
/* /*
* If dest_cpu is allowed for this process, migrate the task to it. * sched_exec - execve() is a valuable balancing opportunity, because at
* This is accomplished by forcing the cpu_allowed mask to only * this point the task has the smallest effective memory and cache footprint.
* allow dest_cpu, which will force the cpu onto dest_cpu. Then
* the cpu_allowed mask is restored.
*/ */
static void sched_migrate_task(struct task_struct *p, int dest_cpu) void sched_exec(void)
{ {
struct task_struct *p = current;
struct migration_req req; struct migration_req req;
int dest_cpu, this_cpu;
unsigned long flags; unsigned long flags;
struct rq *rq; struct rq *rq;
again:
this_cpu = get_cpu();
dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == this_cpu) {
put_cpu();
return;
}
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
put_cpu();
/*
* select_task_rq() can race against ->cpus_allowed
*/
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
|| unlikely(!cpu_active(dest_cpu))) || unlikely(!cpu_active(dest_cpu))) {
goto out; task_rq_unlock(rq, &flags);
goto again;
}
/* force the process onto the specified CPU */ /* force the process onto the specified CPU */
if (migrate_task(p, dest_cpu, &req)) { if (migrate_task(p, dest_cpu, &req)) {
...@@ -3161,23 +3176,9 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) ...@@ -3161,23 +3176,9 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
return; return;
} }
out:
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
} }
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
* this point the task has the smallest effective memory and cache footprint.
*/
void sched_exec(void)
{
int new_cpu, this_cpu = get_cpu();
new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
put_cpu();
if (new_cpu != this_cpu)
sched_migrate_task(current, new_cpu);
}
/* /*
* pull_task - move a task from a remote runqueue to the local runqueue. * pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked. * Both runqueues must be locked.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册