aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-10-20 08:47:12 +0200
committerVincent Guittot <vincent.guittot@linaro.org>2014-11-20 09:44:13 +0100
commit7742fc42858185d5401e1c350c78b864a804e79d (patch)
treefb715550d78d9a45ec8c93378637b76892f106b2 /kernel/sched/fair.c
parent35371289798814bc7070f612aecf5adcf844eb64 (diff)
sched: ensure 1 task per CPU
The scheduler has the main target to balance the load across the system wrt the load of the tasks and the capacity of the CPUs when the capacity of the CPUs become heterogenous because micro-arch, RT tasks or frequency scaling, we can face the situation where trying the leverage the load across the group is not the most performant policy. let use the example of a dual core system: we have 1 RT task that runs on CPU1 TBF
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2d88cf0107d8..2d7b01342b0b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6511,6 +6511,12 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
local = &sds->local_stat;
busiest = &sds->busiest_stat;
+ if (busiest->group_no_capacity &&
+ group_has_capacity(env, local)) {
+ env->imbalance = min(busiest->load_per_task, sds->avg_load);
+ return;
+ }
+
if (!local->sum_nr_running)
local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
else if (busiest->load_per_task > local->load_per_task)
@@ -6691,7 +6697,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
goto force_balance;
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
- if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
+ if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
busiest->group_no_capacity)
goto force_balance;
@@ -6709,6 +6715,12 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
if (local->avg_load >= sds.avg_load)
goto out_balanced;
+ /* If busiest is not overloaded and local hasn't got any free capacity,
+ * the system is balanced whatever the avg_load
+ */
+ if (!busiest->group_no_capacity && !group_has_capacity(env, local))
+ goto out_balanced;
+
if (env->idle == CPU_IDLE) {
/*
* This cpu is idle. If the busiest group is not overloaded
@@ -6730,6 +6742,9 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
goto out_balanced;
}
+ if (!busiest->group_no_capacity && !group_has_capacity(env, local))
+ goto out_balanced;
+
force_balance:
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(env, &sds);