提交 fa9f90be 编写于 作者: J Jesper Juhl 提交者: Jiri Kosina

Kill off a bunch of warning: ‘inline’ is not at beginning of declaration

These warnings are spewed during a build of a 'allnoconfig' kernel
(especially the ones from u64_stats_sync.h show up a lot) when building
with -Wextra (which I often do)..
They are
  a) annoying
  b) easy to get rid of.
This patch kills them off.

include/linux/u64_stats_sync.h:70:1: warning: ‘inline’ is not at beginning of declaration
include/linux/u64_stats_sync.h:77:1: warning: ‘inline’ is not at beginning of declaration
include/linux/u64_stats_sync.h:84:1: warning: ‘inline’ is not at beginning of declaration
include/linux/u64_stats_sync.h:96:1: warning: ‘inline’ is not at beginning of declaration
include/linux/u64_stats_sync.h:115:1: warning: ‘inline’ is not at beginning of declaration
include/linux/u64_stats_sync.h:127:1: warning: ‘inline’ is not at beginning of declaration
kernel/time.c:241:1: warning: ‘inline’ is not at beginning of declaration
kernel/time.c:257:1: warning: ‘inline’ is not at beginning of declaration
kernel/perf_event.c:4513:1: warning: ‘inline’ is not at beginning of declaration
mm/page_alloc.c:4012:1: warning: ‘inline’ is not at beginning of declaration
Signed-off-by: NJesper Juhl <jj@chaosbits.net>
Signed-off-by: NJiri Kosina <jkosina@suse.cz>
上级 73d8c34f
...@@ -67,21 +67,21 @@ struct u64_stats_sync { ...@@ -67,21 +67,21 @@ struct u64_stats_sync {
#endif #endif
}; };
static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_begin(&syncp->seq); write_seqcount_begin(&syncp->seq);
#endif #endif
} }
static void inline u64_stats_update_end(struct u64_stats_sync *syncp) static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_end(&syncp->seq); write_seqcount_end(&syncp->seq);
#endif #endif
} }
static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq); return read_seqcount_begin(&syncp->seq);
...@@ -93,7 +93,7 @@ static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *sy ...@@ -93,7 +93,7 @@ static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *sy
#endif #endif
} }
static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start) unsigned int start)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
...@@ -112,7 +112,7 @@ static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, ...@@ -112,7 +112,7 @@ static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
* - UP 32bit must disable BH. * - UP 32bit must disable BH.
* - 64bit have no problem atomically reading u64 values, irq safe. * - 64bit have no problem atomically reading u64 values, irq safe.
*/ */
static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq); return read_seqcount_begin(&syncp->seq);
...@@ -124,7 +124,7 @@ static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync ...@@ -124,7 +124,7 @@ static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync
#endif #endif
} }
static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
unsigned int start) unsigned int start)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
......
...@@ -4454,7 +4454,7 @@ int perf_swevent_get_recursion_context(void) ...@@ -4454,7 +4454,7 @@ int perf_swevent_get_recursion_context(void)
} }
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
void inline perf_swevent_put_recursion_context(int rctx) inline void perf_swevent_put_recursion_context(int rctx)
{ {
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
......
...@@ -238,7 +238,7 @@ EXPORT_SYMBOL(current_fs_time); ...@@ -238,7 +238,7 @@ EXPORT_SYMBOL(current_fs_time);
* Avoid unnecessary multiplications/divisions in the * Avoid unnecessary multiplications/divisions in the
* two most common HZ cases: * two most common HZ cases:
*/ */
unsigned int inline jiffies_to_msecs(const unsigned long j) inline unsigned int jiffies_to_msecs(const unsigned long j)
{ {
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
return (MSEC_PER_SEC / HZ) * j; return (MSEC_PER_SEC / HZ) * j;
...@@ -254,7 +254,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j) ...@@ -254,7 +254,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
} }
EXPORT_SYMBOL(jiffies_to_msecs); EXPORT_SYMBOL(jiffies_to_msecs);
unsigned int inline jiffies_to_usecs(const unsigned long j) inline unsigned int jiffies_to_usecs(const unsigned long j)
{ {
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (USEC_PER_SEC / HZ) * j; return (USEC_PER_SEC / HZ) * j;
......
...@@ -4013,7 +4013,7 @@ static void __init setup_usemap(struct pglist_data *pgdat, ...@@ -4013,7 +4013,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
} }
#else #else
static void inline setup_usemap(struct pglist_data *pgdat, static inline void setup_usemap(struct pglist_data *pgdat,
struct zone *zone, unsigned long zonesize) {} struct zone *zone, unsigned long zonesize) {}
#endif /* CONFIG_SPARSEMEM */ #endif /* CONFIG_SPARSEMEM */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册