aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2015-01-20 12:57:47 +0000
committerRobin Randhawa <robin.randhawa@arm.com>2015-04-09 12:26:15 +0100
commit8cc211d406d7ce9acbbbe5a79552690d1d6dfded (patch)
treedee628078780a1f79b6b612f3cdbd9f9bae60e77
parent3833bbffc228dee3a479c85c8153ddadaf4206a8 (diff)
sched: Introduce energy awareness into find_busiest_queue
In case that after the gathering of sched domain statistics the current load balancing operation is still in energy-aware mode and a least efficient sched group has been found, detect the least efficient cpu by comparing the cpu efficiency (ratio between cpu usage and cpu energy consumption) among all cpus of the least efficient sched group. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
-rw-r--r--kernel/sched/fair.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 01d8f52ccad4..748ad1532538 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6723,6 +6723,37 @@ static struct rq *find_busiest_queue(struct lb_env *env,
unsigned long busiest_load = 0, busiest_capacity = 1;
int i;
+ if (env->use_ea) {
+ struct rq *costliest = NULL;
+ unsigned long costliest_usage = 1024, costliest_energy = 1;
+
+ for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
+ unsigned long usage = get_cpu_usage(i);
+ struct rq *rq = cpu_rq(i);
+ struct sched_domain *sd = rcu_dereference(rq->sd);
+ struct energy_env eenv = {
+ .sg_top = sd->groups,
+ .usage_delta = 0,
+ .src_cpu = -1,
+ .dst_cpu = -1,
+ };
+ unsigned long energy = sched_group_energy(&eenv);
+
+ /*
+ * We're looking for the minimal cpu efficiency
+ * min(u_i / e_i), crosswise multiplication leads to
+ * u_i * e_j < u_j * e_i with j as previous minimum.
+ */
+ if (usage * costliest_energy < costliest_usage * energy) {
+ costliest_usage = usage;
+ costliest_energy = energy;
+ costliest = rq;
+ }
+ }
+
+ return costliest;
+ }
+
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long capacity, wl;