aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-01-23 17:23:35 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-02-21 09:06:05 -0800
commitc0cfbbb0d4fca14b828a7635a59784adfba8989d (patch)
treea6201565b275d63b17df2a256e9b30c91f94cb6d /kernel/rcutree_plugin.h
parent3d3b7db0a22085cfc05c3318b9874f7fb8266d18 (diff)
rcu: No interrupt disabling for rcu_prepare_for_idle()
The rcu_prepare_for_idle() function is always called with interrupts disabled, so there is no reason to disable interrupts again within rcu_prepare_for_idle(). Therefore, this commit removes all of the interrupt disabling, also removing a latent disabling-unbalance bug. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h18
1 files changed, 1 insertions, 17 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 98ce17cf1fb..5e25ee327cc 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2096,10 +2096,6 @@ static void rcu_cleanup_after_idle(int cpu)
*/
static void rcu_prepare_for_idle(int cpu)
{
- unsigned long flags;
-
- local_irq_save(flags);
-
/*
* If there are no callbacks on this CPU, enter dyntick-idle mode.
* Also reset state to avoid prejudicing later attempts.
@@ -2107,7 +2103,6 @@ static void rcu_prepare_for_idle(int cpu)
if (!rcu_cpu_has_callbacks(cpu)) {
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
per_cpu(rcu_dyntick_drain, cpu) = 0;
- local_irq_restore(flags);
trace_rcu_prep_idle("No callbacks");
return;
}
@@ -2117,7 +2112,6 @@ static void rcu_prepare_for_idle(int cpu)
* refrained from disabling the scheduling-clock tick.
*/
if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
- local_irq_restore(flags);
trace_rcu_prep_idle("In holdoff");
return;
}
@@ -2142,7 +2136,6 @@ static void rcu_prepare_for_idle(int cpu)
} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
/* We have hit the limit, so time to give up. */
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
- local_irq_restore(flags);
trace_rcu_prep_idle("Begin holdoff");
invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
return;
@@ -2154,23 +2147,17 @@ static void rcu_prepare_for_idle(int cpu)
*/
#ifdef CONFIG_TREE_PREEMPT_RCU
if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
- local_irq_restore(flags);
rcu_preempt_qs(cpu);
force_quiescent_state(&rcu_preempt_state, 0);
- local_irq_save(flags);
}
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
if (per_cpu(rcu_sched_data, cpu).nxtlist) {
- local_irq_restore(flags);
rcu_sched_qs(cpu);
force_quiescent_state(&rcu_sched_state, 0);
- local_irq_save(flags);
}
if (per_cpu(rcu_bh_data, cpu).nxtlist) {
- local_irq_restore(flags);
rcu_bh_qs(cpu);
force_quiescent_state(&rcu_bh_state, 0);
- local_irq_save(flags);
}
/*
@@ -2178,13 +2165,10 @@ static void rcu_prepare_for_idle(int cpu)
* So try forcing the callbacks through the grace period.
*/
if (rcu_cpu_has_callbacks(cpu)) {
- local_irq_restore(flags);
trace_rcu_prep_idle("More callbacks");
invoke_rcu_core();
- } else {
- local_irq_restore(flags);
+ } else
trace_rcu_prep_idle("Callbacks drained");
- }
}
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */