summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTodd Poynor <toddpoynor@google.com>2013-01-02 13:14:00 -0800
committerRuchi Kandoi <kandoiruchi@google.com>2015-02-03 16:49:38 -0800
commit6e0e613f270a97f6cab331734204c985f1a8d538 (patch)
treee6607a6768835582b13a537dfd5e42d56824d8e2
parent67dd6bf34358da6f9a9b81711ecbf3968529ec18 (diff)
cpufreq: interactive: fix deadlock on spinlock in timer
Need to use irqsave/restore spinlock calls to avoid a deadlock in calls from the timer. Change-Id: I15b6b590045ba1447e34ca7b5ff342723e53a605 Signed-off-by: Todd Poynor <toddpoynor@google.com>
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 51c34bf9c17..e7f26aae186 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -126,6 +126,7 @@ static void cpufreq_interactive_timer_resched(
struct cpufreq_interactive_cpuinfo *pcpu)
{
unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+ unsigned long flags;
mod_timer_pinned(&pcpu->cpu_timer, expires);
if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
@@ -133,27 +134,28 @@ static void cpufreq_interactive_timer_resched(
mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
}
- spin_lock(&pcpu->load_lock);
+ spin_lock_irqsave(&pcpu->load_lock, flags);
pcpu->time_in_idle =
get_cpu_idle_time_us(smp_processor_id(),
&pcpu->time_in_idle_timestamp);
pcpu->cputime_speedadj = 0;
pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
- spin_unlock(&pcpu->load_lock);
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
static unsigned int freq_to_targetload(unsigned int freq)
{
int i;
unsigned int ret;
+ unsigned long flags;
- spin_lock(&target_loads_lock);
+ spin_lock_irqsave(&target_loads_lock, flags);
for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
;
ret = target_loads[i];
- spin_unlock(&target_loads_lock);
+ spin_unlock_irqrestore(&target_loads_lock, flags);
return ret;
}
@@ -284,11 +286,11 @@ static void cpufreq_interactive_timer(unsigned long data)
if (!pcpu->governor_enabled)
goto exit;
- spin_lock(&pcpu->load_lock);
+ spin_lock_irqsave(&pcpu->load_lock, flags);
now = update_load(data);
delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
cputime_speedadj = pcpu->cputime_speedadj;
- spin_unlock(&pcpu->load_lock);
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
if (WARN_ON_ONCE(!delta_time))
goto rearm;
@@ -549,6 +551,7 @@ static int cpufreq_interactive_notifier(
struct cpufreq_freqs *freq = data;
struct cpufreq_interactive_cpuinfo *pcpu;
int cpu;
+ unsigned long flags;
if (val == CPUFREQ_POSTCHANGE) {
pcpu = &per_cpu(cpuinfo, freq->cpu);
@@ -562,9 +565,9 @@ static int cpufreq_interactive_notifier(
for_each_cpu(cpu, pcpu->policy->cpus) {
struct cpufreq_interactive_cpuinfo *pjcpu =
&per_cpu(cpuinfo, cpu);
- spin_lock(&pjcpu->load_lock);
+ spin_lock_irqsave(&pjcpu->load_lock, flags);
update_load(cpu);
- spin_unlock(&pjcpu->load_lock);
+ spin_unlock_irqrestore(&pjcpu->load_lock, flags);
}
up_read(&pcpu->enable_sem);
@@ -581,15 +584,16 @@ static ssize_t show_target_loads(
{
int i;
ssize_t ret = 0;
+ unsigned long flags;
- spin_lock(&target_loads_lock);
+ spin_lock_irqsave(&target_loads_lock, flags);
for (i = 0; i < ntarget_loads; i++)
ret += sprintf(buf + ret, "%u%s", target_loads[i],
i & 0x1 ? ":" : " ");
ret += sprintf(buf + ret, "\n");
- spin_unlock(&target_loads_lock);
+ spin_unlock_irqrestore(&target_loads_lock, flags);
return ret;
}
@@ -602,6 +606,7 @@ static ssize_t store_target_loads(
unsigned int *new_target_loads = NULL;
int ntokens = 1;
int i;
+ unsigned long flags;
cp = buf;
while ((cp = strpbrk(cp + 1, " :")))
@@ -631,12 +636,12 @@ static ssize_t store_target_loads(
if (i != ntokens)
goto err_inval;
- spin_lock(&target_loads_lock);
+ spin_lock_irqsave(&target_loads_lock, flags);
if (target_loads != default_target_loads)
kfree(target_loads);
target_loads = new_target_loads;
ntarget_loads = ntokens;
- spin_unlock(&target_loads_lock);
+ spin_unlock_irqrestore(&target_loads_lock, flags);
return count;
err_inval: