aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUwe Kleine-König <u.kleine-koenig@pengutronix.de>2013-07-09 00:26:32 +0200
committerSteven Rostedt <rostedt@rostedt.homelinux.com>2013-12-06 20:56:36 -0500
commit68f34698d321912e4227e0152e59b2e98459fe06 (patch)
tree86a252ca0b2f16b612ab5dfaa26d73125f24a7d6
parent59802b1e0149f8aff01fe1e8355b15b2747c9034 (diff)
list_bl.h: fix it for for !SMP && !DEBUG_SPINLOCK
The patch "list_bl.h: make list head locking RT safe" introduced an unconditional __set_bit(0, (unsigned long *)b); in void hlist_bl_lock(struct hlist_bl_head *b). This clobbers the value of b->first. When the value of b->first is retrieved using hlist_bl_first the clobbering is undone using (unsigned long)h->first & ~LIST_BL_LOCKMASK and so depending on LIST_BL_LOCKMASK being one. But LIST_BL_LOCKMASK is only one if at least on of CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are defined. Without these the value returned by hlist_bl_first has the zeroth bit set which likely results in a crash. So only do the clobbering in the cases where LIST_BL_LOCKMASK is one. An alternative would be to always define LIST_BL_LOCKMASK to one with CONFIG_PREEMPT_RT_BASE. Cc: stable-rt@vger.kernel.org Acked-by: Paul Gortmaker <paul.gortmaker@windriver.com> Tested-by: Paul Gortmaker <paul.gortmaker@windriver.com> Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--include/linux/list_bl.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ddfd46a88f93..becd7a61263f 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -131,8 +131,10 @@ static inline void hlist_bl_lock(struct hlist_bl_head *b)
bit_spin_lock(0, (unsigned long *)b);
#else
raw_spin_lock(&b->lock);
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
__set_bit(0, (unsigned long *)b);
#endif
+#endif
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
@@ -140,7 +142,9 @@ static inline void hlist_bl_unlock(struct hlist_bl_head *b)
#ifndef CONFIG_PREEMPT_RT_BASE
__bit_spin_unlock(0, (unsigned long *)b);
#else
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
__clear_bit(0, (unsigned long *)b);
+#endif
raw_spin_unlock(&b->lock);
#endif
}