diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 94115453c1c4cdccf65864a6755ba8fdbf8cc795..2b5e150e070b515bd75f9ea9ad382c539b8c4ebb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1109,10 +1109,10 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) p->sched_class->set_cpus_allowed(p, new_mask); - if (running) - p->sched_class->set_curr_task(rq); if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE); + if (running) + p->sched_class->set_curr_task(rq); } /* @@ -3707,10 +3707,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio) p->prio = prio; - if (running) - p->sched_class->set_curr_task(rq); if (queued) enqueue_task(rq, p, queue_flag); + if (running) + p->sched_class->set_curr_task(rq); check_class_changed(rq, p, prev_class, oldprio); out_unlock: @@ -4263,8 +4263,6 @@ static int __sched_setscheduler(struct task_struct *p, prev_class = p->sched_class; __setscheduler(rq, p, attr, pi); - if (running) - p->sched_class->set_curr_task(rq); if (queued) { /* * We enqueue to tail when the priority of a task is @@ -4275,6 +4273,8 @@ static int __sched_setscheduler(struct task_struct *p, enqueue_task(rq, p, queue_flags); } + if (running) + p->sched_class->set_curr_task(rq); check_class_changed(rq, p, prev_class, oldprio); preempt_disable(); /* avoid rq from going away on us */ @@ -5439,10 +5439,10 @@ void sched_setnuma(struct task_struct *p, int nid) p->numa_preferred_nid = nid; - if (running) - p->sched_class->set_curr_task(rq); if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE); + if (running) + p->sched_class->set_curr_task(rq); task_rq_unlock(rq, p, &rf); } #endif /* CONFIG_NUMA_BALANCING */ @@ -7949,10 +7949,10 @@ void sched_move_task(struct task_struct *tsk) sched_change_group(tsk, TASK_MOVE_GROUP); - if (unlikely(running)) - tsk->sched_class->set_curr_task(rq); if (queued) enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE); + if (unlikely(running)) + tsk->sched_class->set_curr_task(rq); task_rq_unlock(rq, tsk, &rf); }