提交 765dcd20 编写于 作者: M Marco Elver 提交者: Thomas Gleixner

asm-generic/atomic: Use __always_inline for fallback wrappers

Use __always_inline for atomic fallback wrappers. When building for size
(CC_OPTIMIZE_FOR_SIZE), some compilers appear to be less inclined to
inline even relatively small static inline functions that are assumed to
be inlinable such as atomic ops. This can cause problems, for example in
UACCESS regions.

While the fallback wrappers aren't pure wrappers, they are trivial
nonetheless, and the function they wrap should determine the final
inlining policy.

For x86 tinyconfig we observe:
- vmlinux baseline: 1315988
- vmlinux with patch: 1315928 (-60 bytes)

[ tglx: Cherry-picked from KCSAN ]
Suggested-by: NMark Rutland <mark.rutland@arm.com>
Signed-off-by: NMarco Elver <elver@google.com>
Acked-by: NMark Rutland <mark.rutland@arm.com>
Signed-off-by: NPaul E. McKenney <paulmck@kernel.org>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 b29482fd
此差异已折叠。
cat <<EOF cat <<EOF
static inline ${ret} static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_acquire(${params}) ${atomic}_${pfx}${name}${sfx}_acquire(${params})
{ {
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args}); ${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
......
...@@ -8,7 +8,7 @@ cat <<EOF ...@@ -8,7 +8,7 @@ cat <<EOF
* if the result is negative, or false when * if the result is negative, or false when
* result is greater than or equal to zero. * result is greater than or equal to zero.
*/ */
static inline bool static __always_inline bool
${atomic}_add_negative(${int} i, ${atomic}_t *v) ${atomic}_add_negative(${int} i, ${atomic}_t *v)
{ {
return ${atomic}_add_return(i, v) < 0; return ${atomic}_add_return(i, v) < 0;
......
...@@ -8,7 +8,7 @@ cat << EOF ...@@ -8,7 +8,7 @@ cat << EOF
* Atomically adds @a to @v, if @v was not already @u. * Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done. * Returns true if the addition was done.
*/ */
static inline bool static __always_inline bool
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) ${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{ {
return ${atomic}_fetch_add_unless(v, a, u) != u; return ${atomic}_fetch_add_unless(v, a, u) != u;
......
cat <<EOF cat <<EOF
static inline ${ret} static __always_inline ${ret}
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v) ${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{ {
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v); ${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
......
cat <<EOF cat <<EOF
static inline ${ret} static __always_inline ${ret}
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v) ${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{ {
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v); ${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
......
...@@ -7,7 +7,7 @@ cat <<EOF ...@@ -7,7 +7,7 @@ cat <<EOF
* returns true if the result is 0, or false for all other * returns true if the result is 0, or false for all other
* cases. * cases.
*/ */
static inline bool static __always_inline bool
${atomic}_dec_and_test(${atomic}_t *v) ${atomic}_dec_and_test(${atomic}_t *v)
{ {
return ${atomic}_dec_return(v) == 0; return ${atomic}_dec_return(v) == 0;
......
cat <<EOF cat <<EOF
static inline ${ret} static __always_inline ${ret}
${atomic}_dec_if_positive(${atomic}_t *v) ${atomic}_dec_if_positive(${atomic}_t *v)
{ {
${int} dec, c = ${atomic}_read(v); ${int} dec, c = ${atomic}_read(v);
......
cat <<EOF cat <<EOF
static inline bool static __always_inline bool
${atomic}_dec_unless_positive(${atomic}_t *v) ${atomic}_dec_unless_positive(${atomic}_t *v)
{ {
${int} c = ${atomic}_read(v); ${int} c = ${atomic}_read(v);
......
cat <<EOF cat <<EOF
static inline ${ret} static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}(${params}) ${atomic}_${pfx}${name}${sfx}(${params})
{ {
${ret} ret; ${ret} ret;
......
...@@ -8,7 +8,7 @@ cat << EOF ...@@ -8,7 +8,7 @@ cat << EOF
* Atomically adds @a to @v, so long as @v was not already @u. * Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v * Returns original value of @v
*/ */
static inline ${int} static __always_inline ${int}
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) ${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{ {
${int} c = ${atomic}_read(v); ${int} c = ${atomic}_read(v);
......
cat <<EOF cat <<EOF
static inline ${ret} static __always_inline ${ret}
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v) ${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{ {
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v); ${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
......
...@@ -7,7 +7,7 @@ cat <<EOF ...@@ -7,7 +7,7 @@ cat <<EOF
* and returns true if the result is zero, or false for all * and returns true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static inline bool static __always_inline bool
${atomic}_inc_and_test(${atomic}_t *v) ${atomic}_inc_and_test(${atomic}_t *v)
{ {
return ${atomic}_inc_return(v) == 0; return ${atomic}_inc_return(v) == 0;
......
...@@ -6,7 +6,7 @@ cat <<EOF ...@@ -6,7 +6,7 @@ cat <<EOF
* Atomically increments @v by 1, if @v is non-zero. * Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done. * Returns true if the increment was done.
*/ */
static inline bool static __always_inline bool
${atomic}_inc_not_zero(${atomic}_t *v) ${atomic}_inc_not_zero(${atomic}_t *v)
{ {
return ${atomic}_add_unless(v, 1, 0); return ${atomic}_add_unless(v, 1, 0);
......
cat <<EOF cat <<EOF
static inline bool static __always_inline bool
${atomic}_inc_unless_negative(${atomic}_t *v) ${atomic}_inc_unless_negative(${atomic}_t *v)
{ {
${int} c = ${atomic}_read(v); ${int} c = ${atomic}_read(v);
......
cat <<EOF cat <<EOF
static inline ${ret} static __always_inline ${ret}
${atomic}_read_acquire(const ${atomic}_t *v) ${atomic}_read_acquire(const ${atomic}_t *v)
{ {
return smp_load_acquire(&(v)->counter); return smp_load_acquire(&(v)->counter);
......
cat <<EOF cat <<EOF
static inline ${ret} static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_release(${params}) ${atomic}_${pfx}${name}${sfx}_release(${params})
{ {
__atomic_release_fence(); __atomic_release_fence();
......
cat <<EOF cat <<EOF
static inline void static __always_inline void
${atomic}_set_release(${atomic}_t *v, ${int} i) ${atomic}_set_release(${atomic}_t *v, ${int} i)
{ {
smp_store_release(&(v)->counter, i); smp_store_release(&(v)->counter, i);
......
...@@ -8,7 +8,7 @@ cat <<EOF ...@@ -8,7 +8,7 @@ cat <<EOF
* true if the result is zero, or false for all * true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static inline bool static __always_inline bool
${atomic}_sub_and_test(${int} i, ${atomic}_t *v) ${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{ {
return ${atomic}_sub_return(i, v) == 0; return ${atomic}_sub_return(i, v) == 0;
......
cat <<EOF cat <<EOF
static inline bool static __always_inline bool
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new) ${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{ {
${int} r, o = *old; ${int} r, o = *old;
......
...@@ -149,6 +149,8 @@ cat << EOF ...@@ -149,6 +149,8 @@ cat << EOF
#ifndef _LINUX_ATOMIC_FALLBACK_H #ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H #define _LINUX_ATOMIC_FALLBACK_H
#include <linux/compiler.h>
EOF EOF
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册