diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2014-12-17 13:13:48 +0100 |
---|---|---|
committer | Vincent Guittot <vincent.guittot@linaro.org> | 2015-04-22 19:03:39 +0200 |
commit | 3bb1e2aecc81817e158bead697bf133aa42e24e1 (patch) | |
tree | 607dc1249f69471c41e3a813e134f843f69f01ee | |
parent | 4ddbf3b0f1f755a5aa8908506846bff581e588ac (diff) |
sched: ensure periodic update of load tracking and buddy statesched-tasks-packing
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r-- | kernel/sched/core.c | 8 | ||||
-rw-r--r-- | kernel/sched/fair.c | 13 |
2 files changed, 17 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7d5cf5957337..86f461d90167 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6192,9 +6192,9 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) sd_flags &= ~TOPOLOGY_SD_FLAGS; *sd = (struct sched_domain){ - .min_interval = sd_weight, - .max_interval = 2*sd_weight, - .busy_factor = 32, + .min_interval = 1, + .max_interval = 1, + .busy_factor = 2, .imbalance_pct = 125, .cache_nice_tries = 0, @@ -6219,7 +6219,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) , .last_balance = jiffies, - .balance_interval = sd_weight, + .balance_interval = 1, .smt_gain = 0, .max_newidle_lb_cost = 0, .next_decay_max_lb_cost = jiffies, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0f0258a5b453..dda59872cf31 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6160,6 +6160,14 @@ static unsigned long task_h_load(struct task_struct *p) #else static inline void update_blocked_averages(int cpu) { + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + raw_spin_lock_irqsave(&rq->lock, flags); + update_rq_clock(rq); + update_rq_runnable_avg(rq, rq->nr_running); + update_cfs_rq_blocked_load(&rq->cfs, 1); + raw_spin_unlock_irqrestore(&rq->lock, flags); } static unsigned long task_h_load(struct task_struct *p) @@ -7482,6 +7490,9 @@ more_balance: goto out; out_balanced: + /* We were balanced, so reset the balancing interval */ + sd->balance_interval = sd->min_interval; + /* * We reach balance although we may have faced some affinity * constraints. Clear the imbalance flag if it was set. @@ -8077,6 +8088,8 @@ static inline bool nohz_kick_needed(struct rq *rq) if (time_before(now, nohz.next_balance)) return false; + return true; + if (rq->nr_running >= 2) return true; |