aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2015-01-28 15:56:22 +0000
committerRobin Randhawa <robin.randhawa@arm.com>2015-04-09 12:26:15 +0100
commitdd28dc5ee35875b167efb1ee8ece7318556e984b (patch)
tree935c2e065c64a90326d19fab9e779036043e0d20
parent8cc211d406d7ce9acbbbe5a79552690d1d6dfded (diff)
sched: Introduce energy awareness into detach_tasks
Energy-aware load balancing does not rely on env->imbalance but instead it evaluates the system-wide energy difference for each task on the src rq by potentially moving it to the dst rq. If this energy difference is lesser than zero the task is actually moved from src to dst rq. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> [major change in move_tasks(): Juri Lelli]
-rw-r--r--kernel/sched/fair.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 748ad1532538..09c188d7e508 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5707,10 +5707,10 @@ static int move_tasks(struct lb_env *env)
{
struct list_head *tasks = &env->src_rq->cfs_tasks;
struct task_struct *p;
- unsigned long load;
+ unsigned long load = 0;
int pulled = 0;
- if (env->imbalance <= 0)
+ if (!env->use_ea && env->imbalance <= 0)
return 0;
while (!list_empty(tasks)) {
@@ -5731,6 +5731,20 @@ static int move_tasks(struct lb_env *env)
if (!can_migrate_task(p, env))
goto next;
+ if (env->use_ea) {
+ struct energy_env eenv = {
+ .src_cpu = env->src_cpu,
+ .dst_cpu = env->dst_cpu,
+ .usage_delta = task_utilization(p),
+ };
+ int e_diff = energy_diff(&eenv);
+
+ if (e_diff >= 0)
+ goto next;
+
+ goto move_task;
+ }
+
load = task_h_load(p);
if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
@@ -5739,6 +5753,7 @@ static int move_tasks(struct lb_env *env)
if ((load / 2) > env->imbalance)
goto next;
+move_task:
move_task(p, env);
pulled++;
env->imbalance -= load;
@@ -5757,7 +5772,7 @@ static int move_tasks(struct lb_env *env)
* We only want to steal up to the prescribed amount of
* weighted load.
*/
- if (env->imbalance <= 0)
+ if (!env->use_ea && env->imbalance <= 0)
break;
continue;