提交 33d91f00 编写于 作者: E Eric Dumazet 提交者: David S. Miller

net: u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh()

- Must disable preemption in case of 32bit UP in u64_stats_fetch_begin()
and u64_stats_fetch_retry()

- Add new u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh() for
network usage, disabling BH on 32bit UP only.
Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 7a9b2d59
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
* (On UP, there is no seqcount_t protection, a reader allowing interrupts could * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
* read partial values) * read partial values)
* *
* 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
* u64_stats_fetch_retry_bh() helpers
*
* Usage : * Usage :
* *
* Stats producer (writer) should use following template granted it already got * Stats producer (writer) should use following template granted it already got
...@@ -58,54 +61,80 @@ ...@@ -58,54 +61,80 @@
*/ */
#include <linux/seqlock.h> #include <linux/seqlock.h>
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
struct u64_stats_sync { struct u64_stats_sync {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
seqcount_t seq; seqcount_t seq;
#endif
}; };
static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_begin(&syncp->seq); write_seqcount_begin(&syncp->seq);
#endif
} }
static void inline u64_stats_update_end(struct u64_stats_sync *syncp) static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_end(&syncp->seq); write_seqcount_end(&syncp->seq);
#endif
} }
static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq); return read_seqcount_begin(&syncp->seq);
#else
#if BITS_PER_LONG==32
preempt_disable();
#endif
return 0;
#endif
} }
static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start) unsigned int start)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start); return read_seqcount_retry(&syncp->seq, start);
}
#else #else
struct u64_stats_sync { #if BITS_PER_LONG==32
}; preempt_enable();
#endif
static void inline u64_stats_update_begin(struct u64_stats_sync *syncp) return false;
{ #endif
}
static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
{
} }
static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp) /*
* In case softirq handlers can update u64 counters, readers can use following helpers
* - SMP 32bit arches use seqcount protection, irq safe.
* - UP 32bit must disable BH.
* - 64bit have no problem atomically reading u64 values, irq safe.
*/
static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
#else
#if BITS_PER_LONG==32
local_bh_disable();
#endif
return 0; return 0;
#endif
} }
static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp, static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
unsigned int start) unsigned int start)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start);
#else
#if BITS_PER_LONG==32
local_bh_enable();
#endif
return false; return false;
}
#endif #endif
}
#endif /* _LINUX_U64_STATS_SYNC_H */ #endif /* _LINUX_U64_STATS_SYNC_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册