提交 65cc8e48 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Optimize task_rq_lock()

Now that we hold the rq->lock over set_task_cpu() again, we can do
away with most of the TASK_WAKING checks and reduce them again to
set_cpus_allowed_ptr().

Removes some conditionals from scheduling hot-paths.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 0017d735
...@@ -914,8 +914,8 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ...@@ -914,8 +914,8 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/* /*
* Check whether the task is waking, we use this to synchronize against * Check whether the task is waking, we use this to synchronize ->cpus_allowed
* ttwu() so that task_cpu() reports a stable number. * against ttwu().
*/ */
static inline int task_is_waking(struct task_struct *p) static inline int task_is_waking(struct task_struct *p)
{ {
...@@ -932,11 +932,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) ...@@ -932,11 +932,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
struct rq *rq; struct rq *rq;
for (;;) { for (;;) {
while (task_is_waking(p))
cpu_relax();
rq = task_rq(p); rq = task_rq(p);
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p) && !task_is_waking(p))) if (likely(rq == task_rq(p)))
return rq; return rq;
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
} }
...@@ -953,12 +951,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) ...@@ -953,12 +951,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
struct rq *rq; struct rq *rq;
for (;;) { for (;;) {
while (task_is_waking(p))
cpu_relax();
local_irq_save(*flags); local_irq_save(*flags);
rq = task_rq(p); rq = task_rq(p);
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p) && !task_is_waking(p))) if (likely(rq == task_rq(p)))
return rq; return rq;
raw_spin_unlock_irqrestore(&rq->lock, *flags); raw_spin_unlock_irqrestore(&rq->lock, *flags);
} }
...@@ -5262,7 +5258,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ...@@ -5262,7 +5258,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
struct rq *rq; struct rq *rq;
int ret = 0; int ret = 0;
/*
* Serialize against TASK_WAKING so that ttwu() and wunt() can
* drop the rq->lock and still rely on ->cpus_allowed.
*/
again:
while (task_is_waking(p))
cpu_relax();
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
if (task_is_waking(p)) {
task_rq_unlock(rq, &flags);
goto again;
}
if (!cpumask_intersects(new_mask, cpu_active_mask)) { if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL; ret = -EINVAL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册