summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Green <andy.green@linaro.org>2012-05-11 14:13:40 +0800
committerAndy Green <andy.green@linaro.org>2012-05-11 14:20:29 +0800
commit2ad155b29ce8ef076920623c8d4f24c6f86f3530 (patch)
tree5025928762fb432c59f7dfb35380472d0ed0cdf7
parent3c708d28db4651566df24aa6ba7ff42f9d051e77 (diff)
revert couple cpu idle changestilt-3.4-omap5-eng-8
Signed-off-by: Andy Green <andy.green@linaro.org>
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c211
1 files changed, 67 insertions, 144 deletions
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 9609a9ca185..1e70024ff98 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -21,7 +21,6 @@
#include "common.h"
#include "pm.h"
#include "prm.h"
-#include "clockdomain.h"
#ifdef CONFIG_CPU_IDLE
@@ -45,14 +44,10 @@ static struct cpuidle_params cpuidle_params_table[] = {
#define OMAP4_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
struct omap4_idle_statedata omap4_idle_data[OMAP4_NUM_STATES];
-static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
-static struct clockdomain *cpu_clkdm[NR_CPUS];
-
-static atomic_t abort_barrier;
-static bool cpu_done[NR_CPUS];
+static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
/**
- * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
+ * omap4_enter_idle - Programs OMAP4 to enter the specified state
* @dev: cpuidle device
* @drv: cpuidle driver
* @index: the index of state to be entered
@@ -61,72 +56,34 @@ static bool cpu_done[NR_CPUS];
* specified low power state selected by the governor.
* Returns the amount of time spent in the low power state.
*/
-static int omap4_enter_idle_simple(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- struct timespec ts_preidle, ts_postidle, ts_idle;
- int idle_time;
-
- local_irq_disable();
- local_fiq_disable();
-
- getnstimeofday(&ts_preidle);
-
- omap_do_wfi();
-
- getnstimeofday(&ts_postidle);
- ts_idle = timespec_sub(ts_postidle, ts_preidle);
-
- local_irq_enable();
- local_fiq_enable();
-
- idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
- USEC_PER_SEC;
-
- /* Update cpuidle counters */
- dev->last_residency = idle_time;
-
- return index;
-}
-
-static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
+static int omap4_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
struct omap4_idle_statedata *cx =
cpuidle_get_statedata(&dev->states_usage[index]);
- struct timespec ts_preidle, ts_postidle, ts_idle;
- int idle_time;
+ u32 cpu1_state;
int cpu_id = smp_processor_id();
local_fiq_disable();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
-
/*
- * CPU0 has to wait and stay ON until CPU1 is OFF state.
+ * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state.
* This is necessary to honour hardware recommondation
* of triggeing all the possible low power modes once CPU1 is
* out of coherency and in OFF mode.
+ * Update dev->last_state so that governor stats reflects right
+ * data.
*/
- if ((dev->cpu == 0) && cpumask_test_cpu(1, cpu_online_mask)) {
- while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
- cpu_relax();
-
- /*
- * CPU1 could have already entered & exited idle
- * without hitting off because of a wakeup
- * or a failed attempt to hit off mode. Check for
- * that here, otherwise we could spin forever
- * waiting for CPU1 off.
- */
- if (cpu_done[1])
- goto fail;
-
- }
+ cpu1_state = pwrdm_read_pwrst(cpu1_pd);
+ if (cpu1_state != PWRDM_POWER_OFF) {
+ index = drv->safe_state_index;
+ cx = cpuidle_get_statedata(&dev->states_usage[index]);
}
+ if (index > 0)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+
/*
* Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved.
@@ -134,34 +91,25 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
if (cx->cpu_state == PWRDM_POWER_OFF)
cpu_pm_enter();
- if (dev->cpu == 0) {
- pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
- /*
- * Call idle CPU cluster PM enter notifier chain
- * to save GIC and wakeupgen context.
- */
- if ((cx->mpu_state == PWRDM_POWER_RET) &&
- (cx->mpu_logic_state == PWRDM_POWER_OFF))
- cpu_cluster_pm_enter();
- }
+ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
- omap_enter_lowpower(dev->cpu, cx->cpu_state);
-
- cpu_done[dev->cpu] = true;
+ /*
+ * Call idle CPU cluster PM enter notifier chain
+ * to save GIC and wakeupgen context.
+ */
+ if ((cx->mpu_state == PWRDM_POWER_RET) &&
+ (cx->mpu_logic_state == PWRDM_POWER_OFF))
+ cpu_cluster_pm_enter();
- /* Wakeup CPU1 only if it is not offlined */
- if ((dev->cpu == 0) && cpumask_test_cpu(1, cpu_online_mask)) {
- clkdm_wakeup(cpu_clkdm[1]);
- clkdm_allow_idle(cpu_clkdm[1]);
- }
+ omap_enter_lowpower(dev->cpu, cx->cpu_state);
/*
* Call idle CPU PM exit notifier chain to restore
* VFP and per CPU IRQ context. Only CPU0 state is
* considered since CPU1 is managed by CPU hotplug.
*/
- if (pwrdm_read_prev_pwrst(cpu_pd[dev->cpu]) == PWRDM_POWER_OFF)
+ if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF)
cpu_pm_exit();
/*
@@ -171,20 +119,11 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
if (omap_mpuss_read_prev_context_state())
cpu_cluster_pm_exit();
-fail:
- cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
- cpu_done[dev->cpu] = false;
-
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+ if (index > 0)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
local_fiq_enable();
- idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
- USEC_PER_SEC;
-
- /* Update cpuidle counters */
- dev->last_residency = idle_time;
-
return index;
}
@@ -197,17 +136,14 @@ struct cpuidle_driver omap4_idle_driver = {
};
static inline void _fill_cstate(struct cpuidle_driver *drv,
- int idx, const char *descr, int flags)
+ int idx, const char *descr)
{
struct cpuidle_state *state = &drv->states[idx];
state->exit_latency = cpuidle_params_table[idx].exit_latency;
state->target_residency = cpuidle_params_table[idx].target_residency;
- state->flags = (CPUIDLE_FLAG_TIME_VALID | flags);
- if (state->flags & CPUIDLE_FLAG_COUPLED)
- state->enter = omap4_enter_idle_coupled;
- else
- state->enter = omap4_enter_idle_simple;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
+ state->enter = omap4_enter_idle;
sprintf(state->name, "C%d", idx + 1);
strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
}
@@ -241,60 +177,47 @@ int __init omap4_idle_init(void)
unsigned int cpu_id = 0;
mpu_pd = pwrdm_lookup("mpu_pwrdm");
- cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
- cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
- if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
+ cpu0_pd = pwrdm_lookup("cpu0_pwrdm");
+ cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
+ if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd))
return -ENODEV;
- cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
- cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
- if (!cpu_clkdm[0] || !cpu_clkdm[1])
- return -ENODEV;
- for_each_cpu(cpu_id, cpu_online_mask) {
- drv->safe_state_index = -1;
- dev = &per_cpu(omap4_idle_dev, cpu_id);
- dev->cpu = cpu_id;
- dev->state_count = 0;
- dev->coupled_cpus = *cpu_online_mask;
- drv->state_count = 0;
-
- /* C1 - CPU0 ON + CPU1 ON + MPU ON */
- _fill_cstate(drv, 0, "MPUSS ON", 0);
- drv->safe_state_index = 0;
- cx = _fill_cstate_usage(dev, 0);
- cx->valid = 1; /* C1 is always valid */
- cx->cpu_state = PWRDM_POWER_ON;
- cx->mpu_state = PWRDM_POWER_ON;
- cx->mpu_logic_state = PWRDM_POWER_RET;
- dev->state_count++;
- drv->state_count++;
-
- /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
- _fill_cstate(drv, 1, "MPUSS CSWR", CPUIDLE_FLAG_COUPLED);
- cx = _fill_cstate_usage(dev, 1);
- cx->cpu_state = PWRDM_POWER_OFF;
- cx->mpu_state = PWRDM_POWER_RET;
- cx->mpu_logic_state = PWRDM_POWER_RET;
- dev->state_count++;
- drv->state_count++;
-
- /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
- _fill_cstate(drv, 2, "MPUSS OSWR", CPUIDLE_FLAG_COUPLED);
- cx = _fill_cstate_usage(dev, 2);
- cx->cpu_state = PWRDM_POWER_OFF;
- cx->mpu_state = PWRDM_POWER_RET;
- cx->mpu_logic_state = PWRDM_POWER_OFF;
- dev->state_count++;
- drv->state_count++;
-
- cpuidle_register_driver(&omap4_idle_driver);
-
- if (cpuidle_register_device(dev)) {
- pr_err("%s: CPUidle register failed\n", __func__);
- return -EIO;
+ drv->safe_state_index = -1;
+ dev = &per_cpu(omap4_idle_dev, cpu_id);
+ dev->cpu = cpu_id;
+
+ /* C1 - CPU0 ON + CPU1 ON + MPU ON */
+ _fill_cstate(drv, 0, "MPUSS ON");
+ drv->safe_state_index = 0;
+ cx = _fill_cstate_usage(dev, 0);
+ cx->valid = 1; /* C1 is always valid */
+ cx->cpu_state = PWRDM_POWER_ON;
+ cx->mpu_state = PWRDM_POWER_ON;
+ cx->mpu_logic_state = PWRDM_POWER_RET;
+
+ /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
+ _fill_cstate(drv, 1, "MPUSS CSWR");
+ cx = _fill_cstate_usage(dev, 1);
+ cx->cpu_state = PWRDM_POWER_OFF;
+ cx->mpu_state = PWRDM_POWER_RET;
+ cx->mpu_logic_state = PWRDM_POWER_RET;
+
+ /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
+ _fill_cstate(drv, 2, "MPUSS OSWR");
+ cx = _fill_cstate_usage(dev, 2);
+ cx->cpu_state = PWRDM_POWER_OFF;
+ cx->mpu_state = PWRDM_POWER_RET;
+ cx->mpu_logic_state = PWRDM_POWER_OFF;
+
+ drv->state_count = OMAP4_NUM_STATES;
+ cpuidle_register_driver(&omap4_idle_driver);
+
+ dev->state_count = OMAP4_NUM_STATES;
+ if (cpuidle_register_device(dev)) {
+ pr_err("%s: CPUidle register device failed\n", __func__);
+ return -EIO;
}
- }
return 0;
}