提交 7608dec2 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Drop the rq argument to sched_class::select_task_rq()

In preparation of calling select_task_rq() without rq->lock held, drop
the dependency on the rq argument.
Reviewed-by: NFrank Rowand <frank.rowand@am.sony.com>
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110405152729.031077745@chello.nlSigned-off-by: NIngo Molnar <mingo@elte.hu>
上级 013fdb80
...@@ -1067,8 +1067,7 @@ struct sched_class { ...@@ -1067,8 +1067,7 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p); void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int (*select_task_rq)(struct rq *rq, struct task_struct *p, int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq); void (*post_schedule) (struct rq *this_rq);
......
...@@ -2195,13 +2195,15 @@ static int migration_cpu_stop(void *data); ...@@ -2195,13 +2195,15 @@ static int migration_cpu_stop(void *data);
* The task's runqueue lock must be held. * The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread. * Returns true if you have to wait for migration thread.
*/ */
static bool migrate_task(struct task_struct *p, struct rq *rq) static bool need_migrate_task(struct task_struct *p)
{ {
/* /*
* If the task is not on a runqueue (and not running), then * If the task is not on a runqueue (and not running), then
* the next wake-up will properly place the task. * the next wake-up will properly place the task.
*/ */
return p->on_rq || task_running(rq, p); bool running = p->on_rq || p->on_cpu;
smp_rmb(); /* finish_lock_switch() */
return running;
} }
/* /*
...@@ -2376,9 +2378,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p) ...@@ -2376,9 +2378,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
*/ */
static inline static inline
int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
{ {
int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
/* /*
* In order not to call set_task_cpu() on a blocking task we need * In order not to call set_task_cpu() on a blocking task we need
...@@ -2533,7 +2535,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, ...@@ -2533,7 +2535,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
en_flags |= ENQUEUE_WAKING; en_flags |= ENQUEUE_WAKING;
} }
cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu) if (cpu != orig_cpu)
set_task_cpu(p, cpu); set_task_cpu(p, cpu);
__task_rq_unlock(rq); __task_rq_unlock(rq);
...@@ -2744,7 +2746,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) ...@@ -2744,7 +2746,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* We set TASK_WAKING so that select_task_rq() can drop rq->lock * We set TASK_WAKING so that select_task_rq() can drop rq->lock
* without people poking at ->cpus_allowed. * without people poking at ->cpus_allowed.
*/ */
cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
set_task_cpu(p, cpu); set_task_cpu(p, cpu);
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
...@@ -3474,7 +3476,7 @@ void sched_exec(void) ...@@ -3474,7 +3476,7 @@ void sched_exec(void)
int dest_cpu; int dest_cpu;
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id()) if (dest_cpu == smp_processor_id())
goto unlock; goto unlock;
...@@ -3482,7 +3484,7 @@ void sched_exec(void) ...@@ -3482,7 +3484,7 @@ void sched_exec(void)
* select_task_rq() can race against ->cpus_allowed * select_task_rq() can race against ->cpus_allowed
*/ */
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) { likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
struct migration_arg arg = { p, dest_cpu }; struct migration_arg arg = { p, dest_cpu };
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
...@@ -5911,7 +5913,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ...@@ -5911,7 +5913,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out; goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (migrate_task(p, rq)) { if (need_migrate_task(p)) {
struct migration_arg arg = { p, dest_cpu }; struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */ /* Need help from migration thread: drop lock and wait. */
__task_rq_unlock(rq); __task_rq_unlock(rq);
......
...@@ -1657,7 +1657,7 @@ static int select_idle_sibling(struct task_struct *p, int target) ...@@ -1657,7 +1657,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
* preempt must be disabled. * preempt must be disabled.
*/ */
static int static int
select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags) select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
{ {
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int static int
select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags) select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
{ {
return task_cpu(p); /* IDLE tasks as never migrated */ return task_cpu(p); /* IDLE tasks as never migrated */
} }
......
...@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq) ...@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq)
static int find_lowest_rq(struct task_struct *task); static int find_lowest_rq(struct task_struct *task);
static int static int
select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
{ {
struct task_struct *curr;
struct rq *rq;
int cpu;
if (sd_flag != SD_BALANCE_WAKE) if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id(); return smp_processor_id();
cpu = task_cpu(p);
rq = cpu_rq(cpu);
rcu_read_lock();
curr = ACCESS_ONCE(rq->curr); /* unlocked access */
/* /*
* If the current task is an RT task, then * If the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another * try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task * runqueue. Otherwise simply start this RT task
* on its current runqueue. * on its current runqueue.
...@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) ...@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
* lock? * lock?
* *
* For equal prio tasks, we just let the scheduler sort it out. * For equal prio tasks, we just let the scheduler sort it out.
*
* Otherwise, just let it ride on the affined RQ and the
* post-schedule router will push the preempted task away
*
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*/ */
if (unlikely(rt_task(rq->curr)) && if (curr && unlikely(rt_task(curr)) &&
(rq->curr->rt.nr_cpus_allowed < 2 || (curr->rt.nr_cpus_allowed < 2 ||
rq->curr->prio < p->prio) && curr->prio < p->prio) &&
(p->rt.nr_cpus_allowed > 1)) { (p->rt.nr_cpus_allowed > 1)) {
int cpu = find_lowest_rq(p); int target = find_lowest_rq(p);
return (cpu == -1) ? task_cpu(p) : cpu; if (target != -1)
cpu = target;
} }
rcu_read_unlock();
/* return cpu;
* Otherwise, just let it ride on the affined RQ and the
* post-schedule router will push the preempted task away
*/
return task_cpu(p);
} }
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
......
...@@ -9,8 +9,7 @@ ...@@ -9,8 +9,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int static int
select_task_rq_stop(struct rq *rq, struct task_struct *p, select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
int sd_flag, int flags)
{ {
return task_cpu(p); /* stop tasks as never migrate */ return task_cpu(p); /* stop tasks as never migrate */
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册