提交 e07d283d 编写于 作者: A Arnd Bergmann 提交者: Yang Yingliang

smp: Fix smp_call_function_single_async prototype

stable inclusion
from linux-4.19.191
commit db0517ac659e0ac61b916cffc29564cf3ab58b0d

--------------------------------

commit 1139aeb1 upstream.

As of commit 966a9671 ("smp: Avoid using two cache lines for struct
call_single_data"), the smp code prefers 32-byte aligned call_single_data
objects for performance reasons, but the block layer includes an instance
of this structure in the main 'struct request' that is more senstive
to size than to performance here, see 4ccafe03 ("block: unalign
call_single_data in struct request").

The result is a violation of the calling conventions that clang correctly
points out:

block/blk-mq.c:630:39: warning: passing 8-byte aligned argument to 32-byte aligned parameter 2 of 'smp_call_function_single_async' may result in an unaligned pointer access [-Walign-mismatch]
                smp_call_function_single_async(cpu, &rq->csd);

It does seem that the usage of the call_single_data without cache line
alignment should still be allowed by the smp code, so just change the
function prototype so it accepts both, but leave the default alignment
unchanged for the other users. This seems better to me than adding
a local hack to shut up an otherwise correct warning in the caller.
Signed-off-by: NArnd Bergmann <arnd@arndb.de>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: NJens Axboe <axboe@kernel.dk>
Link: https://lkml.kernel.org/r/20210505211300.3174456-1-arnd@kernel.org
[nc: Fix conflicts]
Signed-off-by: NNathan Chancellor <nathan@kernel.org>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 4bf849b8
...@@ -53,7 +53,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), ...@@ -53,7 +53,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait, smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags); gfp_t gfp_flags);
int smp_call_function_single_async(int cpu, call_single_data_t *csd); int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -103,12 +103,12 @@ void __init call_function_init(void) ...@@ -103,12 +103,12 @@ void __init call_function_init(void)
* previous function call. For multi-cpu calls its even more interesting * previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd. * as we'll have to ensure no other cpu is observing our csd.
*/ */
static __always_inline void csd_lock_wait(call_single_data_t *csd) static __always_inline void csd_lock_wait(struct __call_single_data *csd)
{ {
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
} }
static __always_inline void csd_lock(call_single_data_t *csd) static __always_inline void csd_lock(struct __call_single_data *csd)
{ {
csd_lock_wait(csd); csd_lock_wait(csd);
csd->flags |= CSD_FLAG_LOCK; csd->flags |= CSD_FLAG_LOCK;
...@@ -121,7 +121,7 @@ static __always_inline void csd_lock(call_single_data_t *csd) ...@@ -121,7 +121,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
smp_wmb(); smp_wmb();
} }
static __always_inline void csd_unlock(call_single_data_t *csd) static __always_inline void csd_unlock(struct __call_single_data *csd)
{ {
WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
...@@ -138,7 +138,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); ...@@ -138,7 +138,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
* for execution on the given CPU. data must already have * for execution on the given CPU. data must already have
* ->func, ->info, and ->flags set. * ->func, ->info, and ->flags set.
*/ */
static int generic_exec_single(int cpu, call_single_data_t *csd, static int generic_exec_single(int cpu, struct __call_single_data *csd,
smp_call_func_t func, void *info) smp_call_func_t func, void *info)
{ {
if (cpu == smp_processor_id()) { if (cpu == smp_processor_id()) {
...@@ -323,7 +323,7 @@ EXPORT_SYMBOL(smp_call_function_single); ...@@ -323,7 +323,7 @@ EXPORT_SYMBOL(smp_call_function_single);
* NOTE: Be careful, there is unfortunately no current debugging facility to * NOTE: Be careful, there is unfortunately no current debugging facility to
* validate the correctness of this serialization. * validate the correctness of this serialization.
*/ */
int smp_call_function_single_async(int cpu, call_single_data_t *csd) int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
{ {
int err = 0; int err = 0;
......
...@@ -23,7 +23,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, ...@@ -23,7 +23,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
} }
EXPORT_SYMBOL(smp_call_function_single); EXPORT_SYMBOL(smp_call_function_single);
int smp_call_function_single_async(int cpu, call_single_data_t *csd) int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
{ {
unsigned long flags; unsigned long flags;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册