提交 013fdb80 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Serialize p->cpus_allowed and ttwu() using p->pi_lock

Currently p->pi_lock already serializes p->sched_class, also put
p->cpus_allowed and try_to_wake_up() under it, this prepares the way
to do the first part of ttwu() without holding rq->lock.

By having p->sched_class and p->cpus_allowed serialized by p->pi_lock,
we prepare the way to call select_task_rq() without holding rq->lock.
Reviewed-by: NFrank Rowand <frank.rowand@am.sony.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110405152728.990364093@chello.nlSigned-off-by: NIngo Molnar <mingo@elte.hu>
上级 fd2f4419
......@@ -2340,7 +2340,7 @@ EXPORT_SYMBOL_GPL(kick_process);
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
* ->cpus_allowed is protected by both rq->lock and p->pi_lock
*/
static int select_fallback_rq(int cpu, struct task_struct *p)
{
......@@ -2373,7 +2373,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
}
/*
* The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/
static inline
int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
......@@ -2499,7 +2499,8 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
this_cpu = get_cpu();
smp_wmb();
rq = task_rq_lock(p, &flags);
raw_spin_lock_irqsave(&p->pi_lock, flags);
rq = __task_rq_lock(p);
if (!(p->state & state))
goto out;
......@@ -2557,7 +2558,8 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
ttwu_stat(rq, p, cpu, wake_flags);
success = 1;
out:
task_rq_unlock(rq, &flags);
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
put_cpu();
return success;
......@@ -4694,6 +4696,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
BUG_ON(prio < 0 || prio > MAX_PRIO);
lockdep_assert_held(&p->pi_lock);
rq = task_rq_lock(p, &flags);
trace_sched_pi_setprio(p, prio);
......@@ -5317,7 +5321,6 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
struct task_struct *p;
unsigned long flags;
struct rq *rq;
int retval;
get_online_cpus();
......@@ -5332,9 +5335,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
if (retval)
goto out_unlock;
rq = task_rq_lock(p, &flags);
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
task_rq_unlock(rq, &flags);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
rcu_read_unlock();
......@@ -5882,18 +5885,8 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
unsigned int dest_cpu;
int ret = 0;
/*
* Serialize against TASK_WAKING so that ttwu() and wunt() can
* drop the rq->lock and still rely on ->cpus_allowed.
*/
again:
while (task_is_waking(p))
cpu_relax();
rq = task_rq_lock(p, &flags);
if (task_is_waking(p)) {
task_rq_unlock(rq, &flags);
goto again;
}
raw_spin_lock_irqsave(&p->pi_lock, flags);
rq = __task_rq_lock(p);
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
......@@ -5921,13 +5914,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
if (migrate_task(p, rq)) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, &flags);
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
}
out:
task_rq_unlock(rq, &flags);
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return ret;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册