diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f12225f26b70a630ac185cfccba2b140f191c47e..091e089063be1dc25ab1180fec3f99a4408fb024 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5851,11 +5851,14 @@ void __init sched_init_smp(void) /* * There's no userspace yet to cause hotplug operations; hence all the * CPU masks are stable and all blatant races in the below code cannot - * happen. + * happen. The hotplug lock is nevertheless taken to satisfy lockdep, + * but there won't be any contention on it. */ + cpus_read_lock(); mutex_lock(&sched_domains_mutex); sched_init_domains(cpu_active_mask); mutex_unlock(&sched_domains_mutex); + cpus_read_unlock(); /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ee271bb661cc923dfa67ae5d5c45a18c71df7cb1..3648d0300fdf98b45702a42d0d672d38c97c3be7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) local = 1; /* - * Retry task to preferred node migration periodically, in case it - * case it previously failed, or the scheduler moved us. + * Retry to migrate task to preferred node periodically, in case it + * previously failed, or the scheduler moved us. */ if (time_after(jiffies, p->numa_migrate_retry)) { task_numa_placement(p);