diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 957f16f6c6c4dbb46cee3d3f1d909fd9cb7a0cb6..40259a2988e94fafe7c6f3546a917adbf700557c 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1438,6 +1438,27 @@ static int klp_mem_prepare(struct klp_patch *patch) return 0; } +static int klp_stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) +{ + int ret; + + /* + * Cpu hotplug locking is a "percpu" rw semaphore, however write + * lock and read lock on it are globally mutual exclusive, that is + * cpus_write_lock() on one cpu can block all cpus_read_lock() + * on other cpus, vice versa. + * + * Since cpu hotplug take the cpus_write_lock() before text_mutex, + * here take cpus_read_lock() before text_mutex to avoid deadlock. + */ + cpus_read_lock(); + arch_klp_code_modify_prepare(); + ret = stop_machine_cpuslocked(fn, data, cpus); + arch_klp_code_modify_post_process(); + cpus_read_unlock(); + return ret; +} + static int __klp_disable_patch(struct klp_patch *patch) { int ret; @@ -1458,9 +1479,7 @@ static int __klp_disable_patch(struct klp_patch *patch) } #endif - arch_klp_code_modify_prepare(); - ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask); - arch_klp_code_modify_post_process(); + ret = klp_stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask); if (ret) return ret; @@ -1699,9 +1718,7 @@ static int __klp_enable_patch(struct klp_patch *patch) ret = klp_mem_prepare(patch); if (ret) return ret; - arch_klp_code_modify_prepare(); - ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); - arch_klp_code_modify_post_process(); + ret = klp_stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); if (ret) { klp_mem_recycle(patch); return ret;