提交 0d85923c 编写于 作者: T Thomas Gleixner 提交者: Ingo Molnar

smpboot/threads, watchdog/core: Avoid runtime allocation

smpboot_update_cpumask_threads_percpu() allocates a temporary cpumask at
runtime. This is suboptimal because the call site needs more code size for
proper error handling than a statically allocated temporary mask requires
data size.

Add static temporary cpumask. The function is globaly serialized, so no
further protection required.

Remove the half baken error handling in the watchdog code and get rid of
the export as there are no in tree modular users of that function.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Reviewed-by: NDon Zickus <dzickus@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Link: http://lkml.kernel.org/r/20170912194147.297288838@linutronix.deSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 05ba3de7
......@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
}
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
const struct cpumask *);
void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
const struct cpumask *);
#endif
......@@ -344,39 +344,31 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
* by the client, but only by calling this function.
* This function can only be called on a registered smp_hotplug_thread.
*/
int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
const struct cpumask *new)
void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
const struct cpumask *new)
{
struct cpumask *old = plug_thread->cpumask;
cpumask_var_t tmp;
static struct cpumask tmp;
unsigned int cpu;
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return -ENOMEM;
get_online_cpus();
mutex_lock(&smpboot_threads_lock);
/* Park threads that were exclusively enabled on the old mask. */
cpumask_andnot(tmp, old, new);
for_each_cpu_and(cpu, tmp, cpu_online_mask)
cpumask_andnot(&tmp, old, new);
for_each_cpu_and(cpu, &tmp, cpu_online_mask)
smpboot_park_thread(plug_thread, cpu);
/* Unpark threads that are exclusively enabled on the new mask. */
cpumask_andnot(tmp, new, old);
for_each_cpu_and(cpu, tmp, cpu_online_mask)
cpumask_andnot(&tmp, new, old);
for_each_cpu_and(cpu, &tmp, cpu_online_mask)
smpboot_unpark_thread(plug_thread, cpu);
cpumask_copy(old, new);
mutex_unlock(&smpboot_threads_lock);
put_online_cpus();
free_cpumask_var(tmp);
return 0;
}
EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
......
......@@ -787,31 +787,20 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
return err;
}
static int watchdog_update_cpus(void)
static void watchdog_update_cpus(void)
{
if (IS_ENABLED(CONFIG_SOFTLOCKUP_DETECTOR)) {
return smpboot_update_cpumask_percpu_thread(&watchdog_threads,
&watchdog_cpumask);
if (IS_ENABLED(CONFIG_SOFTLOCKUP_DETECTOR) && watchdog_running) {
smpboot_update_cpumask_percpu_thread(&watchdog_threads,
&watchdog_cpumask);
__lockup_detector_cleanup();
}
return 0;
}
static void proc_watchdog_cpumask_update(void)
{
/* Remove impossible cpus to keep sysctl output clean. */
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
if (watchdog_running) {
/*
* Failure would be due to being unable to allocate a
* temporary cpumask, so we are likely not in a position to
* do much else to make things better.
*/
if (watchdog_update_cpus() != 0)
pr_err("cpumask update failed\n");
}
watchdog_update_cpus();
watchdog_nmi_reconfigure();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册