diff options
author | Daniel Lezcano <daniel.lezcano@linaro.org> | 2014-02-03 17:27:09 +0100 |
---|---|---|
committer | Daniel Lezcano <daniel.lezcano@linaro.org> | 2014-02-06 11:15:39 +0000 |
commit | 2558ba5d0ea2d0ab8e35a29e298d1e3d69f31035 (patch) | |
tree | 4dca19e21d078859f27483a72c373c5b11b0f4a5 | |
parent | 8e77889a59a486fb20f6ab123a712d1579150e88 (diff) |
idle: Move idle conditions in cpuidle_idle main function
This patch moves the condition before entering idle into the cpuidle main
function located in idle.c. That simplify the idle mainloop functions and
increase the readibility of the conditions to enter truly idle.
This patch is code reorganization and does not change the behavior of the
function.
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
-rw-r--r-- | kernel/sched/idle.c | 41 |
1 files changed, 21 insertions, 20 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 6aff9e796b38..b8e54f6c7d15 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -74,6 +74,23 @@ static int cpuidle_idle_call(void) struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int next_state, entered_state; + /* + * In poll mode we reenable interrupts and spin. + * + * Also if we detected in the wakeup from idle path that the + * tick broadcast device expired for us, we don't want to go + * deep idle as we know that the IPI is going to arrive right + * away + */ + if (cpu_idle_force_poll || tick_check_broadcast_expired()) + return cpu_idle_poll(); + + if (current_clr_polling_and_test()) { + local_irq_enable(); + __current_set_polling(); + return 0; + } + stop_critical_timings(); rcu_idle_enter(); @@ -110,6 +127,8 @@ out: rcu_idle_exit(); start_critical_timings(); + __current_set_polling(); + return 0; } @@ -131,26 +150,8 @@ static void cpu_idle_loop(void) local_irq_disable(); arch_cpu_idle_enter(); - /* - * In poll mode we reenable interrupts and spin. - * - * Also if we detected in the wakeup from idle - * path that the tick broadcast device expired - * for us, we don't want to go deep idle as we - * know that the IPI is going to arrive right - * away - */ - if (cpu_idle_force_poll || tick_check_broadcast_expired()) { - cpu_idle_poll(); - } else { - if (!current_clr_polling_and_test()) { - cpuidle_idle_call(); - } - else { - local_irq_enable(); - } - __current_set_polling(); - } + cpuidle_idle_call(); + arch_cpu_idle_exit(); } |