提交 5b8d27dd 编写于 作者: Y Yang Yingliang

Revert "smp: Fix smp_call_function_single_async prototype"

hulk inclusion
category: bugfix
bugzilla: NA
CVE: NA

--------------------------------

This reverts commit e07d283d
to avoid kabi broken.
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NJian Cheng <cj.chengjian@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 67f82cad
...@@ -53,7 +53,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), ...@@ -53,7 +53,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait, smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags); gfp_t gfp_flags);
int smp_call_function_single_async(int cpu, struct __call_single_data *csd); int smp_call_function_single_async(int cpu, call_single_data_t *csd);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -103,12 +103,12 @@ void __init call_function_init(void) ...@@ -103,12 +103,12 @@ void __init call_function_init(void)
* previous function call. For multi-cpu calls its even more interesting * previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd. * as we'll have to ensure no other cpu is observing our csd.
*/ */
static __always_inline void csd_lock_wait(struct __call_single_data *csd) static __always_inline void csd_lock_wait(call_single_data_t *csd)
{ {
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
} }
static __always_inline void csd_lock(struct __call_single_data *csd) static __always_inline void csd_lock(call_single_data_t *csd)
{ {
csd_lock_wait(csd); csd_lock_wait(csd);
csd->flags |= CSD_FLAG_LOCK; csd->flags |= CSD_FLAG_LOCK;
...@@ -121,7 +121,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd) ...@@ -121,7 +121,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
smp_wmb(); smp_wmb();
} }
static __always_inline void csd_unlock(struct __call_single_data *csd) static __always_inline void csd_unlock(call_single_data_t *csd)
{ {
WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
...@@ -138,7 +138,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); ...@@ -138,7 +138,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
* for execution on the given CPU. data must already have * for execution on the given CPU. data must already have
* ->func, ->info, and ->flags set. * ->func, ->info, and ->flags set.
*/ */
static int generic_exec_single(int cpu, struct __call_single_data *csd, static int generic_exec_single(int cpu, call_single_data_t *csd,
smp_call_func_t func, void *info) smp_call_func_t func, void *info)
{ {
if (cpu == smp_processor_id()) { if (cpu == smp_processor_id()) {
...@@ -323,7 +323,7 @@ EXPORT_SYMBOL(smp_call_function_single); ...@@ -323,7 +323,7 @@ EXPORT_SYMBOL(smp_call_function_single);
* NOTE: Be careful, there is unfortunately no current debugging facility to * NOTE: Be careful, there is unfortunately no current debugging facility to
* validate the correctness of this serialization. * validate the correctness of this serialization.
*/ */
int smp_call_function_single_async(int cpu, struct __call_single_data *csd) int smp_call_function_single_async(int cpu, call_single_data_t *csd)
{ {
int err = 0; int err = 0;
......
...@@ -23,7 +23,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, ...@@ -23,7 +23,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
} }
EXPORT_SYMBOL(smp_call_function_single); EXPORT_SYMBOL(smp_call_function_single);
int smp_call_function_single_async(int cpu, struct __call_single_data *csd) int smp_call_function_single_async(int cpu, call_single_data_t *csd)
{ {
unsigned long flags; unsigned long flags;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册