aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authormorten.rasmussen@arm.com <morten.rasmussen@arm.com>2014-12-02 14:06:31 +0000
committerVincent Guittot <vincent.guittot@linaro.org>2014-12-03 15:54:07 +0100
commit3084ddf08d482f96307e6ae111f756d322193589 (patch)
tree9657a48825d1614a6c53c80082a95ac29ac42428 /kernel/sched/fair.c
parent216a8d36dfbb3911f084f4380e5bedbfee572c11 (diff)
sched: Include blocked utilization in usage tracking
Add the blocked utilization contribution to group sched_entity utilization (se->avg.utilization_avg_contrib) and to get_cpu_usage(). With this change cpu usage now includes recent usage by currently non-runnable tasks, hence it provides a more stable view of the cpu usage. It does, however, also mean that the meaning of usage is changed: A cpu may be momentarily idle while usage >0. It can no longer be assumed that cpu usage >0 implies runnable tasks on the rq. cfs_rq->utilization_load_avg or nr_running should be used instead to get the current rq status. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2042bdc57d47..764020d41ecc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2558,7 +2558,8 @@ static long __update_entity_utilization_avg_contrib(struct sched_entity *se)
__update_task_entity_utilization(se);
else
se->avg.utilization_avg_contrib =
- group_cfs_rq(se)->utilization_load_avg;
+ group_cfs_rq(se)->utilization_load_avg +
+ group_cfs_rq(se)->utilization_blocked_avg;
return se->avg.utilization_avg_contrib - old_contrib;
}
@@ -4621,11 +4622,12 @@ done:
static int get_cpu_usage(int cpu)
{
unsigned long usage = cpu_rq(cpu)->cfs.utilization_load_avg;
+ unsigned long blocked = cpu_rq(cpu)->cfs.utilization_blocked_avg;
- if (usage >= SCHED_LOAD_SCALE)
+ if (usage + blocked >= SCHED_LOAD_SCALE)
return capacity_orig_of(cpu);
- return usage;
+ return usage + blocked;
}
/*