提交 7ab85d4a 编写于 作者: L Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:
 "Three small fixes in the scheduler/core:

   - use after free in the numa code
   - crash in the numa init code
   - a simple spelling fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  pid: Fix spelling in comments
  sched/numa: Fix use-after-free bug in the task_numa_compare
  sched: Fix crash in sched_init_numa()
...@@ -588,7 +588,7 @@ void __init pidhash_init(void) ...@@ -588,7 +588,7 @@ void __init pidhash_init(void)
void __init pidmap_init(void) void __init pidmap_init(void)
{ {
/* Veryify no one has done anything silly */ /* Verify no one has done anything silly: */
BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING); BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
/* bump default and minimum pid_max based on number of cpus */ /* bump default and minimum pid_max based on number of cpus */
......
...@@ -6840,7 +6840,7 @@ static void sched_init_numa(void) ...@@ -6840,7 +6840,7 @@ static void sched_init_numa(void)
sched_domains_numa_masks[i][j] = mask; sched_domains_numa_masks[i][j] = mask;
for (k = 0; k < nr_node_ids; k++) { for_each_node(k) {
if (node_distance(j, k) > sched_domains_numa_distance[i]) if (node_distance(j, k) > sched_domains_numa_distance[i])
continue; continue;
......
...@@ -1220,8 +1220,6 @@ static void task_numa_assign(struct task_numa_env *env, ...@@ -1220,8 +1220,6 @@ static void task_numa_assign(struct task_numa_env *env,
{ {
if (env->best_task) if (env->best_task)
put_task_struct(env->best_task); put_task_struct(env->best_task);
if (p)
get_task_struct(p);
env->best_task = p; env->best_task = p;
env->best_imp = imp; env->best_imp = imp;
...@@ -1289,20 +1287,30 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1289,20 +1287,30 @@ static void task_numa_compare(struct task_numa_env *env,
long imp = env->p->numa_group ? groupimp : taskimp; long imp = env->p->numa_group ? groupimp : taskimp;
long moveimp = imp; long moveimp = imp;
int dist = env->dist; int dist = env->dist;
bool assigned = false;
rcu_read_lock(); rcu_read_lock();
raw_spin_lock_irq(&dst_rq->lock); raw_spin_lock_irq(&dst_rq->lock);
cur = dst_rq->curr; cur = dst_rq->curr;
/* /*
* No need to move the exiting task, and this ensures that ->curr * No need to move the exiting task or idle task.
* wasn't reaped and thus get_task_struct() in task_numa_assign()
* is safe under RCU read lock.
* Note that rcu_read_lock() itself can't protect from the final
* put_task_struct() after the last schedule().
*/ */
if ((cur->flags & PF_EXITING) || is_idle_task(cur)) if ((cur->flags & PF_EXITING) || is_idle_task(cur))
cur = NULL; cur = NULL;
else {
/*
* The task_struct must be protected here to protect the
* p->numa_faults access in the task_weight since the
* numa_faults could already be freed in the following path:
* finish_task_switch()
* --> put_task_struct()
* --> __put_task_struct()
* --> task_numa_free()
*/
get_task_struct(cur);
}
raw_spin_unlock_irq(&dst_rq->lock); raw_spin_unlock_irq(&dst_rq->lock);
/* /*
...@@ -1386,6 +1394,7 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1386,6 +1394,7 @@ static void task_numa_compare(struct task_numa_env *env,
*/ */
if (!load_too_imbalanced(src_load, dst_load, env)) { if (!load_too_imbalanced(src_load, dst_load, env)) {
imp = moveimp - 1; imp = moveimp - 1;
put_task_struct(cur);
cur = NULL; cur = NULL;
goto assign; goto assign;
} }
...@@ -1411,9 +1420,16 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1411,9 +1420,16 @@ static void task_numa_compare(struct task_numa_env *env,
env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu); env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
assign: assign:
assigned = true;
task_numa_assign(env, cur, imp); task_numa_assign(env, cur, imp);
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
/*
* The dst_rq->curr isn't assigned. The protection for task_struct is
* finished.
*/
if (cur && !assigned)
put_task_struct(cur);
} }
static void task_numa_find_cpu(struct task_numa_env *env, static void task_numa_find_cpu(struct task_numa_env *env,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册