提交 fa17b507 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

sched: Wrap scheduler p->cpus_allowed access

This task is preparatory for the migrate_disable() implementation, but
stands on its own and provides a cleanup.

It currently only converts those sites required for task-placement.
Kosaki-san once mentioned replacing cpus_allowed with a proper
cpumask_t instead of the NR_CPUS sized array it currently is, that
would also require something like this.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: NThomas Gleixner <tglx@linutronix.de>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Link: http://lkml.kernel.org/n/tip-e42skvaddos99psip0vce41o@git.kernel.orgSigned-off-by: NIngo Molnar <mingo@elte.hu>
上级 6eb57e0d
...@@ -2544,11 +2544,11 @@ static int select_fallback_rq(int cpu, struct task_struct *p) ...@@ -2544,11 +2544,11 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
/* Look for allowed, online CPU in same node. */ /* Look for allowed, online CPU in same node. */
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
return dest_cpu; return dest_cpu;
/* Any allowed, online CPU? */ /* Any allowed, online CPU? */
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
if (dest_cpu < nr_cpu_ids) if (dest_cpu < nr_cpu_ids)
return dest_cpu; return dest_cpu;
...@@ -2585,7 +2585,7 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) ...@@ -2585,7 +2585,7 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
* [ this allows ->select_task() to simply return task_cpu(p) and * [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ] * not worry about this generic constraint ]
*/ */
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
!cpu_online(cpu))) !cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p); cpu = select_fallback_rq(task_cpu(p), p);
...@@ -6262,7 +6262,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ...@@ -6262,7 +6262,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
if (task_cpu(p) != src_cpu) if (task_cpu(p) != src_cpu)
goto done; goto done;
/* Affinity changed (again). */ /* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
goto fail; goto fail;
/* /*
......
...@@ -2183,7 +2183,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, ...@@ -2183,7 +2183,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/* Skip over this group if it has no CPUs allowed */ /* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_cpus(group), if (!cpumask_intersects(sched_group_cpus(group),
&p->cpus_allowed)) tsk_cpus_allowed(p)))
continue; continue;
local_group = cpumask_test_cpu(this_cpu, local_group = cpumask_test_cpu(this_cpu,
...@@ -2229,7 +2229,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) ...@@ -2229,7 +2229,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
int i; int i;
/* Traverse only the allowed CPUs */ /* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
load = weighted_cpuload(i); load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) { if (load < min_load || (load == min_load && i == this_cpu)) {
...@@ -2273,7 +2273,7 @@ static int select_idle_sibling(struct task_struct *p, int target) ...@@ -2273,7 +2273,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
break; break;
for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) { for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
if (idle_cpu(i)) { if (idle_cpu(i)) {
target = i; target = i;
break; break;
...@@ -2316,7 +2316,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) ...@@ -2316,7 +2316,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
int sync = wake_flags & WF_SYNC; int sync = wake_flags & WF_SYNC;
if (sd_flag & SD_BALANCE_WAKE) { if (sd_flag & SD_BALANCE_WAKE) {
if (cpumask_test_cpu(cpu, &p->cpus_allowed)) if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
want_affine = 1; want_affine = 1;
new_cpu = prev_cpu; new_cpu = prev_cpu;
} }
...@@ -2697,7 +2697,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, ...@@ -2697,7 +2697,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
* 2) cannot be migrated to this CPU due to cpus_allowed, or * 2) cannot be migrated to this CPU due to cpus_allowed, or
* 3) are cache-hot on their current CPU. * 3) are cache-hot on their current CPU.
*/ */
if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
schedstat_inc(p, se.statistics.nr_failed_migrations_affine); schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
return 0; return 0;
} }
...@@ -4087,7 +4087,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4087,7 +4087,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* moved to this_cpu * moved to this_cpu
*/ */
if (!cpumask_test_cpu(this_cpu, if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) { tsk_cpus_allowed(busiest->curr))) {
raw_spin_unlock_irqrestore(&busiest->lock, raw_spin_unlock_irqrestore(&busiest->lock,
flags); flags);
all_pinned = 1; all_pinned = 1;
......
...@@ -1179,7 +1179,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); ...@@ -1179,7 +1179,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{ {
if (!task_running(rq, p) && if (!task_running(rq, p) &&
(cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
(p->rt.nr_cpus_allowed > 1)) (p->rt.nr_cpus_allowed > 1))
return 1; return 1;
return 0; return 0;
...@@ -1324,7 +1324,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) ...@@ -1324,7 +1324,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
*/ */
if (unlikely(task_rq(task) != rq || if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu, !cpumask_test_cpu(lowest_rq->cpu,
&task->cpus_allowed) || tsk_cpus_allowed(task)) ||
task_running(rq, task) || task_running(rq, task) ||
!task->on_rq)) { !task->on_rq)) {
......
...@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) ...@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use * Kernel threads bound to a single CPU can safely use
* smp_processor_id(): * smp_processor_id():
*/ */
if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu))) if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
goto out; goto out;
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册