aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/u64_stats_sync.h59
1 files changed, 44 insertions, 15 deletions
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index b38e3a58de83..fa261a0da280 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -27,6 +27,9 @@
* (On UP, there is no seqcount_t protection, a reader allowing interrupts could
* read partial values)
*
+ * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
+ * u64_stats_fetch_retry_bh() helpers
+ *
* Usage :
*
* Stats producer (writer) should use following template granted it already got
@@ -58,54 +61,80 @@
*/
#include <linux/seqlock.h>
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
struct u64_stats_sync {
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
seqcount_t seq;
+#endif
};
static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_begin(&syncp->seq);
+#endif
}
static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_end(&syncp->seq);
+#endif
}
static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ preempt_disable();
+#endif
+ return 0;
+#endif
}
static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start);
-}
-
#else
-struct u64_stats_sync {
-};
-
-static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
-{
-}
-
-static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
-{
+#if BITS_PER_LONG==32
+ preempt_enable();
+#endif
+ return false;
+#endif
}
-static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+/*
+ * In case softirq handlers can update u64 counters, readers can use following helpers
+ * - SMP 32bit arches use seqcount protection, irq safe.
+ * - UP 32bit must disable BH.
+ * - 64bit have no problem atomically reading u64 values, irq safe.
+ */
+static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ local_bh_disable();
+#endif
return 0;
+#endif
}
-static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
unsigned int start)
{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+ local_bh_enable();
+#endif
return false;
-}
#endif
+}
#endif /* _LINUX_U64_STATS_SYNC_H */