From e26fbffd32c28107d9d268b432706ccf84fb6411 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Mar 2016 12:54:10 +0100 Subject: [PATCH] sched: Allow hotplug notifiers to be setup early Prevent the SMP scheduler related notifiers to be executed before the smp scheduler is initialized and install them early. This is a preparatory change for further consolidation of the hotplug notifier maze. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: rt@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 59 +++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4df9aaae27a2..328502c9af00 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5195,6 +5195,8 @@ int task_can_attach(struct task_struct *p, #ifdef CONFIG_SMP +static bool sched_smp_initialized __read_mostly; + #ifdef CONFIG_NUMA_BALANCING /* Migrate current task p to target_cpu */ int migrate_task_to(struct task_struct *p, int target_cpu) @@ -5513,25 +5515,6 @@ int sched_cpu_starting(unsigned int cpu) return 0; } -static int __init migration_init(void) -{ - void *cpu = (void *)(long)smp_processor_id(); - int err; - - /* Initialize migration for the boot CPU */ - err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); - BUG_ON(err == NOTIFY_BAD); - migration_call(&migration_notifier, CPU_ONLINE, cpu); - register_cpu_notifier(&migration_notifier); - - /* Register cpu active notifiers */ - cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); - cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); - - return 0; -} -early_initcall(migration_init); - static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ #ifdef CONFIG_SCHED_DEBUG @@ -6711,6 +6694,9 @@ static int sched_domains_numa_masks_update(struct notifier_block *nfb, { int cpu = (long)hcpu; + if (!sched_smp_initialized) + return NOTIFY_DONE; + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: sched_domains_numa_masks_set(cpu); @@ -7129,6 +7115,9 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { + if (!sched_smp_initialized) + return NOTIFY_DONE; + switch (action) { case CPU_ONLINE_FROZEN: case CPU_DOWN_FAILED_FROZEN: @@ -7169,6 +7158,9 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, bool overflow; int cpus; + if (!sched_smp_initialized) + return NOTIFY_DONE; + switch (action) { case CPU_DOWN_PREPARE: rcu_read_lock_sched(); @@ -7216,10 +7208,6 @@ void __init sched_init_smp(void) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); mutex_unlock(&sched_domains_mutex); - hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); - hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); - hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); - init_hrtick(); /* Move init over to a non-isolated CPU */ @@ -7230,7 +7218,32 @@ void __init sched_init_smp(void) init_sched_rt_class(); init_sched_dl_class(); + sched_smp_initialized = true; } + +static int __init migration_init(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + int err; + + /* Initialize migration for the boot CPU */ + err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); + BUG_ON(err == NOTIFY_BAD); + migration_call(&migration_notifier, CPU_ONLINE, cpu); + register_cpu_notifier(&migration_notifier); + + /* Register cpu active notifiers */ + cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); + cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); + + hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); + hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); + hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); + + return 0; +} +early_initcall(migration_init); + #else void __init sched_init_smp(void) { -- GitLab