提交 50605ffb 编写于 作者: T Thomas Gleixner 提交者: Ingo Molnar

sched/core: Provide a tsk_nr_cpus_allowed() helper

tsk_nr_cpus_allowed() is an accessor for task->nr_cpus_allowed which allows
us to change the representation of ->nr_cpus_allowed if required.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1462969411-17735-2-git-send-email-bigeasy@linutronix.deSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 ade42e09
...@@ -1930,6 +1930,11 @@ extern int arch_task_struct_size __read_mostly; ...@@ -1930,6 +1930,11 @@ extern int arch_task_struct_size __read_mostly;
/* Future-safe accessor for struct task_struct's cpus_allowed. */ /* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
static inline int tsk_nr_cpus_allowed(struct task_struct *p)
{
return p->nr_cpus_allowed;
}
#define TNF_MIGRATED 0x01 #define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02 #define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04 #define TNF_SHARED 0x04
......
...@@ -1585,7 +1585,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) ...@@ -1585,7 +1585,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
{ {
lockdep_assert_held(&p->pi_lock); lockdep_assert_held(&p->pi_lock);
if (p->nr_cpus_allowed > 1) if (tsk_nr_cpus_allowed(p) > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else else
cpu = cpumask_any(tsk_cpus_allowed(p)); cpu = cpumask_any(tsk_cpus_allowed(p));
......
...@@ -134,7 +134,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -134,7 +134,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{ {
struct task_struct *p = dl_task_of(dl_se); struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1) if (tsk_nr_cpus_allowed(p) > 1)
dl_rq->dl_nr_migratory++; dl_rq->dl_nr_migratory++;
update_dl_migration(dl_rq); update_dl_migration(dl_rq);
...@@ -144,7 +144,7 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -144,7 +144,7 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{ {
struct task_struct *p = dl_task_of(dl_se); struct task_struct *p = dl_task_of(dl_se);
if (p->nr_cpus_allowed > 1) if (tsk_nr_cpus_allowed(p) > 1)
dl_rq->dl_nr_migratory--; dl_rq->dl_nr_migratory--;
update_dl_migration(dl_rq); update_dl_migration(dl_rq);
...@@ -966,7 +966,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) ...@@ -966,7 +966,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
enqueue_dl_entity(&p->dl, pi_se, flags); enqueue_dl_entity(&p->dl, pi_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1) if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_dl_task(rq, p); enqueue_pushable_dl_task(rq, p);
} }
...@@ -1040,9 +1040,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) ...@@ -1040,9 +1040,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
* try to make it stay here, it might be important. * try to make it stay here, it might be important.
*/ */
if (unlikely(dl_task(curr)) && if (unlikely(dl_task(curr)) &&
(curr->nr_cpus_allowed < 2 || (tsk_nr_cpus_allowed(curr) < 2 ||
!dl_entity_preempt(&p->dl, &curr->dl)) && !dl_entity_preempt(&p->dl, &curr->dl)) &&
(p->nr_cpus_allowed > 1)) { (tsk_nr_cpus_allowed(p) > 1)) {
int target = find_later_rq(p); int target = find_later_rq(p);
if (target != -1 && if (target != -1 &&
...@@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) ...@@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* Current can't be migrated, useless to reschedule, * Current can't be migrated, useless to reschedule,
* let's hope p can move out. * let's hope p can move out.
*/ */
if (rq->curr->nr_cpus_allowed == 1 || if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
return; return;
...@@ -1071,7 +1071,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) ...@@ -1071,7 +1071,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* p is migratable, so let's not schedule it and * p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else. * see if it is pushed or pulled somewhere else.
*/ */
if (p->nr_cpus_allowed != 1 && if (tsk_nr_cpus_allowed(p) != 1 &&
cpudl_find(&rq->rd->cpudl, p, NULL) != -1) cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
return; return;
...@@ -1186,7 +1186,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p) ...@@ -1186,7 +1186,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{ {
update_curr_dl(rq); update_curr_dl(rq);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_dl_task(rq, p); enqueue_pushable_dl_task(rq, p);
} }
...@@ -1287,7 +1287,7 @@ static int find_later_rq(struct task_struct *task) ...@@ -1287,7 +1287,7 @@ static int find_later_rq(struct task_struct *task)
if (unlikely(!later_mask)) if (unlikely(!later_mask))
return -1; return -1;
if (task->nr_cpus_allowed == 1) if (tsk_nr_cpus_allowed(task) == 1)
return -1; return -1;
/* /*
...@@ -1433,7 +1433,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) ...@@ -1433,7 +1433,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p)); BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
BUG_ON(!task_on_rq_queued(p)); BUG_ON(!task_on_rq_queued(p));
BUG_ON(!dl_task(p)); BUG_ON(!dl_task(p));
...@@ -1472,7 +1472,7 @@ static int push_dl_task(struct rq *rq) ...@@ -1472,7 +1472,7 @@ static int push_dl_task(struct rq *rq)
*/ */
if (dl_task(rq->curr) && if (dl_task(rq->curr) &&
dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
rq->curr->nr_cpus_allowed > 1) { tsk_nr_cpus_allowed(rq->curr) > 1) {
resched_curr(rq); resched_curr(rq);
return 0; return 0;
} }
...@@ -1619,9 +1619,9 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) ...@@ -1619,9 +1619,9 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
{ {
if (!task_running(rq, p) && if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) && !test_tsk_need_resched(rq->curr) &&
p->nr_cpus_allowed > 1 && tsk_nr_cpus_allowed(p) > 1 &&
dl_task(rq->curr) && dl_task(rq->curr) &&
(rq->curr->nr_cpus_allowed < 2 || (tsk_nr_cpus_allowed(rq->curr) < 2 ||
!dl_entity_preempt(&p->dl, &rq->curr->dl))) { !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
push_dl_tasks(rq); push_dl_tasks(rq);
} }
...@@ -1725,7 +1725,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) ...@@ -1725,7 +1725,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
if (task_on_rq_queued(p) && rq->curr != p) { if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
queue_push_tasks(rq); queue_push_tasks(rq);
#else #else
if (dl_task(rq->curr)) if (dl_task(rq->curr))
......
...@@ -334,7 +334,7 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) ...@@ -334,7 +334,7 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total++; rt_rq->rt_nr_total++;
if (p->nr_cpus_allowed > 1) if (tsk_nr_cpus_allowed(p) > 1)
rt_rq->rt_nr_migratory++; rt_rq->rt_nr_migratory++;
update_rt_migration(rt_rq); update_rt_migration(rt_rq);
...@@ -351,7 +351,7 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) ...@@ -351,7 +351,7 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total--; rt_rq->rt_nr_total--;
if (p->nr_cpus_allowed > 1) if (tsk_nr_cpus_allowed(p) > 1)
rt_rq->rt_nr_migratory--; rt_rq->rt_nr_migratory--;
update_rt_migration(rt_rq); update_rt_migration(rt_rq);
...@@ -1324,7 +1324,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) ...@@ -1324,7 +1324,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
enqueue_rt_entity(rt_se, flags); enqueue_rt_entity(rt_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1) if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
} }
...@@ -1413,7 +1413,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) ...@@ -1413,7 +1413,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
* will have to sort it out. * will have to sort it out.
*/ */
if (curr && unlikely(rt_task(curr)) && if (curr && unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 || (tsk_nr_cpus_allowed(curr) < 2 ||
curr->prio <= p->prio)) { curr->prio <= p->prio)) {
int target = find_lowest_rq(p); int target = find_lowest_rq(p);
...@@ -1437,7 +1437,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) ...@@ -1437,7 +1437,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
* Current can't be migrated, useless to reschedule, * Current can't be migrated, useless to reschedule,
* let's hope p can move out. * let's hope p can move out.
*/ */
if (rq->curr->nr_cpus_allowed == 1 || if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return; return;
...@@ -1445,7 +1445,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) ...@@ -1445,7 +1445,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
* p is migratable, so let's not schedule it and * p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else. * see if it is pushed or pulled somewhere else.
*/ */
if (p->nr_cpus_allowed != 1 if (tsk_nr_cpus_allowed(p) != 1
&& cpupri_find(&rq->rd->cpupri, p, NULL)) && cpupri_find(&rq->rd->cpupri, p, NULL))
return; return;
...@@ -1579,7 +1579,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) ...@@ -1579,7 +1579,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
* The previous task needs to be made eligible for pushing * The previous task needs to be made eligible for pushing
* if it is still active * if it is still active
*/ */
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
} }
...@@ -1629,7 +1629,7 @@ static int find_lowest_rq(struct task_struct *task) ...@@ -1629,7 +1629,7 @@ static int find_lowest_rq(struct task_struct *task)
if (unlikely(!lowest_mask)) if (unlikely(!lowest_mask))
return -1; return -1;
if (task->nr_cpus_allowed == 1) if (tsk_nr_cpus_allowed(task) == 1)
return -1; /* No other targets possible */ return -1; /* No other targets possible */
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
...@@ -1762,7 +1762,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) ...@@ -1762,7 +1762,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p)); BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
BUG_ON(!task_on_rq_queued(p)); BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p)); BUG_ON(!rt_task(p));
...@@ -2122,9 +2122,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) ...@@ -2122,9 +2122,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
{ {
if (!task_running(rq, p) && if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) && !test_tsk_need_resched(rq->curr) &&
p->nr_cpus_allowed > 1 && tsk_nr_cpus_allowed(p) > 1 &&
(dl_task(rq->curr) || rt_task(rq->curr)) && (dl_task(rq->curr) || rt_task(rq->curr)) &&
(rq->curr->nr_cpus_allowed < 2 || (tsk_nr_cpus_allowed(rq->curr) < 2 ||
rq->curr->prio <= p->prio)) rq->curr->prio <= p->prio))
push_rt_tasks(rq); push_rt_tasks(rq);
} }
...@@ -2197,7 +2197,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) ...@@ -2197,7 +2197,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
*/ */
if (task_on_rq_queued(p) && rq->curr != p) { if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
queue_push_tasks(rq); queue_push_tasks(rq);
#else #else
if (p->prio < rq->curr->prio) if (p->prio < rq->curr->prio)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册