aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-12-17 13:13:48 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2014-12-17 13:13:48 +0100
commit8fab92fe85001c7684810419af1832a7d8d327b6 (patch)
tree33e5be07ed77ae5b92d0f087c2d8dd659369ef5c /kernel/sched/fair.c
parentaf786b53102a4acad1c3287a4e716db7347eeb40 (diff)
sched: ensure periodic update of load tracking and buddy state
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d48d07cb5762..77b1212be929 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6146,6 +6146,14 @@ static unsigned long task_h_load(struct task_struct *p)
#else
static inline void update_blocked_averages(int cpu)
{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ update_rq_clock(rq);
+ update_rq_runnable_avg(rq, rq->nr_running);
+ update_cfs_rq_blocked_load(&rq->cfs, 1);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static unsigned long task_h_load(struct task_struct *p)
@@ -7485,6 +7493,9 @@ more_balance:
goto out;
out_balanced:
+ /* We were balanced, so reset the balancing interval */
+ sd->balance_interval = sd->min_interval;
+
/*
* We reach balance although we may have faced some affinity
* constraints. Clear the imbalance flag if it was set.
@@ -8080,6 +8091,8 @@ static inline bool nohz_kick_needed(struct rq *rq)
if (time_before(now, nohz.next_balance))
return false;
+ return true;
+
if (rq->nr_running >= 2)
return true;