提交 874cd339 编写于 作者: L Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:

 - two patches addressing the problem that the scheduler allows under
   certain conditions user space tasks to be scheduled on CPUs which are
   not yet fully booted which causes a few subtle and hard to debug
   issue

 - add a missing runqueue clock update in the deadline scheduler which
   triggers a warning under certain circumstances

 - fix a silly typo in the scheduler header file

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/headers: Fix typo
  sched/deadline: Fix missing clock update
  sched/core: Require cpu_active() in select_task_rq(), for user tasks
  sched/core: Fix rules for running on online && !active CPUs
...@@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) ...@@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline bool is_per_cpu_kthread(struct task_struct *p)
{
if (!(p->flags & PF_KTHREAD))
return false;
if (p->nr_cpus_allowed != 1)
return false;
return true;
}
/*
* Per-CPU kthreads are allowed to run on !actie && online CPUs, see
* __set_cpus_allowed_ptr() and select_fallback_rq().
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
return false;
if (is_per_cpu_kthread(p))
return cpu_online(cpu);
return cpu_active(cpu);
}
/* /*
* This is how migration works: * This is how migration works:
* *
...@@ -938,16 +965,8 @@ struct migration_arg { ...@@ -938,16 +965,8 @@ struct migration_arg {
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int dest_cpu) struct task_struct *p, int dest_cpu)
{ {
if (p->flags & PF_KTHREAD) {
if (unlikely(!cpu_online(dest_cpu)))
return rq;
} else {
if (unlikely(!cpu_active(dest_cpu)))
return rq;
}
/* Affinity changed (again). */ /* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) if (!is_cpu_allowed(p, dest_cpu))
return rq; return rq;
update_rq_clock(rq); update_rq_clock(rq);
...@@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p) ...@@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for (;;) { for (;;) {
/* Any allowed, online CPU? */ /* Any allowed, online CPU? */
for_each_cpu(dest_cpu, &p->cpus_allowed) { for_each_cpu(dest_cpu, &p->cpus_allowed) {
if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) if (!is_cpu_allowed(p, dest_cpu))
continue;
if (!cpu_online(dest_cpu))
continue; continue;
goto out; goto out;
} }
...@@ -1542,8 +1560,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) ...@@ -1542,8 +1560,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
* [ this allows ->select_task() to simply return task_cpu(p) and * [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ] * not worry about this generic constraint ]
*/ */
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || if (unlikely(!is_cpu_allowed(p, cpu)))
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p); cpu = select_fallback_rq(task_cpu(p), p);
return cpu; return cpu;
......
...@@ -1259,6 +1259,9 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) ...@@ -1259,6 +1259,9 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
rq = task_rq_lock(p, &rf); rq = task_rq_lock(p, &rf);
sched_clock_tick();
update_rq_clock(rq);
if (!dl_task(p) || p->state == TASK_DEAD) { if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
...@@ -1278,9 +1281,6 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) ...@@ -1278,9 +1281,6 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
if (dl_se->dl_non_contending == 0) if (dl_se->dl_non_contending == 0)
goto unlock; goto unlock;
sched_clock_tick();
update_rq_clock(rq);
sub_running_bw(dl_se, &rq->dl); sub_running_bw(dl_se, &rq->dl);
dl_se->dl_non_contending = 0; dl_se->dl_non_contending = 0;
unlock: unlock:
......
...@@ -983,7 +983,7 @@ static inline void rq_clock_skip_update(struct rq *rq) ...@@ -983,7 +983,7 @@ static inline void rq_clock_skip_update(struct rq *rq)
} }
/* /*
* See rt task throttoling, which is the only time a skip * See rt task throttling, which is the only time a skip
* request is cancelled. * request is cancelled.
*/ */
static inline void rq_clock_cancel_skipupdate(struct rq *rq) static inline void rq_clock_cancel_skipupdate(struct rq *rq)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册