diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index fa261a0da280a5e87dd8aece56e66600911fbc1c..8da8c4e87da3018323177000d46a5e42b19bf23f 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -67,21 +67,21 @@ struct u64_stats_sync { #endif }; -static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) +static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&syncp->seq); #endif } -static void inline u64_stats_update_end(struct u64_stats_sync *syncp) +static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); #endif } -static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); @@ -93,7 +93,7 @@ static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *sy #endif } -static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, +static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) @@ -112,7 +112,7 @@ static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, * - UP 32bit must disable BH. * - 64bit have no problem atomically reading u64 values, irq safe. */ -static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) +static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); @@ -124,7 +124,7 @@ static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync #endif } -static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, +static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 517d827f498281da50210aa76a02b4693116154a..06682e7b12e24190c592762e24f017f1baef76e9 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4454,7 +4454,7 @@ int perf_swevent_get_recursion_context(void) } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); -void inline perf_swevent_put_recursion_context(int rctx) +inline void perf_swevent_put_recursion_context(int rctx) { struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); diff --git a/kernel/time.c b/kernel/time.c index ba9b338d1835e071273d8f8f8d2a054ab9c95790..32174359576fa075a520de56ab30aacc51af08ef 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -238,7 +238,7 @@ EXPORT_SYMBOL(current_fs_time); * Avoid unnecessary multiplications/divisions in the * two most common HZ cases: */ -unsigned int inline jiffies_to_msecs(const unsigned long j) +inline unsigned int jiffies_to_msecs(const unsigned long j) { #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) return (MSEC_PER_SEC / HZ) * j; @@ -254,7 +254,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j) } EXPORT_SYMBOL(jiffies_to_msecs); -unsigned int inline jiffies_to_usecs(const unsigned long j) +inline unsigned int jiffies_to_usecs(const unsigned long j) { #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) return (USEC_PER_SEC / HZ) * j; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 07a654486f75cfdfe13035f7bf501e2d2dbf0a4a..f823ee192e940c9155611e6ec88070cc310f99a5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4013,7 +4013,7 @@ static void __init setup_usemap(struct pglist_data *pgdat, zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); } #else -static void inline setup_usemap(struct pglist_data *pgdat, +static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zonesize) {} #endif /* CONFIG_SPARSEMEM */