diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-03-01 16:16:02 +0100 |
---|---|---|
committer | Steven Rostedt <rostedt@rostedt.homelinux.com> | 2013-02-06 09:37:53 -0500 |
commit | b7c1c5705642a1618ec0069aa5a321ca10c353aa (patch) | |
tree | 587c1e7db40b97fd262058df1d9bdc14bc958b61 | |
parent | d4582c407567b7cb44888b3182e5bbbf17e2eec9 (diff) |
net: u64_stat: Protect seqcount
On RT we must prevent that the writer gets preempted inside the write
section. Otherwise a preempting reader might spin forever.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/u64_stats_sync.h | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 8da8c4e87da3..b39549fb3c3f 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -70,6 +70,7 @@ struct u64_stats_sync { static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) + preempt_disable_rt(); write_seqcount_begin(&syncp->seq); #endif } @@ -78,6 +79,7 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); + preempt_enable_rt(); #endif } |