net: u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh()

- Must disable preemption in case of 32bit UP in u64_stats_fetch_begin()
and u64_stats_fetch_retry()

- Add new u64_stats_fetch_begin_bh() and u64_stats_fetch_retry_bh() for
network usage, disabling BH on 32bit UP only.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2010-06-24 00:54:06 +00:00 committed by David S. Miller
parent 7a9b2d5950
commit 33d91f00c7
1 changed files with 75 additions and 46 deletions

View File

@ -27,6 +27,9 @@
* (On UP, there is no seqcount_t protection, a reader allowing interrupts could
* read partial values)
*
* 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
* u64_stats_fetch_retry_bh() helpers
*
* Usage :
*
* Stats producer (writer) should use following template granted it already got
@ -58,54 +61,80 @@
*/
#include <linux/seqlock.h>
struct u64_stats_sync {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
struct u64_stats_sync {
seqcount_t seq;
};
static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
{
write_seqcount_begin(&syncp->seq);
}
static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
{
write_seqcount_end(&syncp->seq);
}
static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
return read_seqcount_begin(&syncp->seq);
}
static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
return read_seqcount_retry(&syncp->seq, start);
}
#else
struct u64_stats_sync {
};
static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
{
}
static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
{
}
static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
return 0;
}
static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
return false;
}
#endif
};
static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_begin(&syncp->seq);
#endif
}
static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_end(&syncp->seq);
#endif
}
static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
#else
#if BITS_PER_LONG==32
preempt_disable();
#endif
return 0;
#endif
}
static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start);
#else
#if BITS_PER_LONG==32
preempt_enable();
#endif
return false;
#endif
}
/*
* In case softirq handlers can update u64 counters, readers can use following helpers
* - SMP 32bit arches use seqcount protection, irq safe.
* - UP 32bit must disable BH.
* - 64bit have no problem atomically reading u64 values, irq safe.
*/
static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
#else
#if BITS_PER_LONG==32
local_bh_disable();
#endif
return 0;
#endif
}
static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
unsigned int start)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start);
#else
#if BITS_PER_LONG==32
local_bh_enable();
#endif
return false;
#endif
}
#endif /* _LINUX_U64_STATS_SYNC_H */