From f253eed25b35f3332e14ec2803271fa8042dbc20 Mon Sep 17 00:00:00 2001 From: Zhou Chengming Date: Mon, 11 Feb 2019 16:23:00 +0800 Subject: [PATCH] sched/rt.c: pick and check task if double_lock_balance() unlock the rq euler inclusion category: bugfix bugzilla: 2535 CVE: N/A ------------------------------------------------- push_rt_task() pick the first pushable task and find an eligible lowest_rq, then double_lock_balance(rq, lowest_rq). So if double_lock_balance() unlock the rq (when double_lock_balance() return 1), we have to check if this task is still on the rq. The problem is that the check conditions are not sufficient: if (unlikely(task_rq(task) != rq || !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || !rt_task(task) || !task_on_rq_queued(task))) { cpu2 cpu1 cpu0 push_rt_task(rq1) pick task_A on rq1 find rq0 double_lock_balance(rq1, rq0) unlock(rq1) rq1 __schedule pick task_A run task_A sleep (dequeued) lock(rq0) lock(rq1) do_above_check(task_A) task_rq(task_A) == rq1 cpus_allowed unchanged task_running == false rt_task(task_A) == true try_to_wake_up(task_A) select_cpu = cpu3 enqueue(rq3, task_A) task_A->on_rq = 1 task_on_rq_queued(task_A) above_check passed, return rq0 ... migrate task_A from rq1 to rq0 So we can't rely on these checks of task_A to make sure the task_A is still on the rq1, even though we hold the rq1->lock. This patch will repick the first pushable task to be sure the task is still on the rq. Signed-off-by: Zhou Chengming Signed-off-by: Kefeng Wang Signed-off-by: Hui Wang Signed-off-by: Zhang Xiaoxu Conflicts: kernel/sched/rt.c Signed-off-by: Xuefeng Wang Reviewed-by: Xie XiuQi Signed-off-by: Yang Yingliang --- kernel/sched/rt.c | 50 +++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2e2955a8cf8f..1a8627189302 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1714,6 +1714,26 @@ static int find_lowest_rq(struct task_struct *task) return -1; } +static struct task_struct *pick_next_pushable_task(struct rq *rq) +{ + struct task_struct *p; + + if (!has_pushable_tasks(rq)) + return NULL; + + p = plist_first_entry(&rq->rt.pushable_tasks, + struct task_struct, pushable_tasks); + + BUG_ON(rq->cpu != task_cpu(p)); + BUG_ON(task_current(rq, p)); + BUG_ON(p->nr_cpus_allowed <= 1); + + BUG_ON(!p->on_rq); + BUG_ON(!rt_task(p)); + + return p; +} + /* Will lock the rq it finds */ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) { @@ -1747,11 +1767,11 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) * migrated already or had its affinity changed. * Also make sure that it wasn't scheduled on its rq. */ - if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || - task_running(rq, task) || - !rt_task(task) || - !task_on_rq_queued(task))) { + + struct task_struct *next_task = pick_next_pushable_task(rq); + if (unlikely(next_task != task || + !rt_task(task) || + !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed))) { double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; @@ -1771,26 +1791,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) return lowest_rq; } -static struct task_struct *pick_next_pushable_task(struct rq *rq) -{ - struct task_struct *p; - - if (!has_pushable_tasks(rq)) - return NULL; - - p = plist_first_entry(&rq->rt.pushable_tasks, - struct task_struct, pushable_tasks); - - BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); - BUG_ON(p->nr_cpus_allowed <= 1); - - BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!rt_task(p)); - - return p; -} - /* * If the current CPU has more than one RT task, see if the non * running task can migrate over to a CPU that is running a task -- GitLab