aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-12-03 16:05:58 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2014-12-03 16:05:58 +0100
commitfae8e6e934307754c7cf71d087820eebc1f39f2b (patch)
tree5503bfd682249ccf1bc41e12edeebd596eaaca57 /kernel/sched/fair.c
parentbdf391e89991cf9605b5bedfeb25ed8b3a06db56 (diff)
Revert "sched: Make usage and load tracking cpu scale-invariant"
This reverts commit 9422f02b83fbd5decebaa577f81b05136dc38cc9.
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c27
1 files changed, 5 insertions, 22 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 811c0c677a6a..99cf30c518f6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2267,21 +2267,6 @@ static u32 __compute_runnable_contrib(u64 n)
}
unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
-unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu);
-
-static unsigned long contrib_scale_factor(int cpu)
-{
- unsigned long scale_factor;
-
- scale_factor = arch_scale_freq_capacity(NULL, cpu);
- scale_factor *= arch_scale_cpu_capacity(NULL, cpu);
- scale_factor >>= SCHED_CAPACITY_SHIFT;
-
- return scale_factor;
-}
-
-#define scale_contrib(contrib, scale_factor) \
- ((contrib * scale_factor) >> SCHED_CAPACITY_SHIFT)
/*
* We can represent the historical contribution to runnable average as the
@@ -2319,7 +2304,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
u64 delta, scaled_delta, periods;
u32 runnable_contrib, scaled_runnable_contrib;
int delta_w, scaled_delta_w, decayed = 0;
- unsigned long scale_factor;
+ unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
delta = now - sa->last_runnable_update;
/*
@@ -2340,8 +2325,6 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
return 0;
sa->last_runnable_update = now;
- scale_factor = contrib_scale_factor(cpu);
-
/* delta_w is the amount already accumulated against our next period */
delta_w = sa->avg_period % 1024;
if (delta + delta_w >= 1024) {
@@ -2354,7 +2337,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
* period and accrue it.
*/
delta_w = 1024 - delta_w;
- scaled_delta_w = scale_contrib(delta_w, scale_factor);
+ scaled_delta_w = (delta_w * scale_freq) >> SCHED_CAPACITY_SHIFT;
if (runnable)
sa->runnable_avg_sum += scaled_delta_w;
@@ -2377,8 +2360,8 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
/* Efficiently calculate \sum (1..n_period) 1024*y^i */
runnable_contrib = __compute_runnable_contrib(periods);
- scaled_runnable_contrib =
- scale_contrib(runnable_contrib, scale_factor);
+ scaled_runnable_contrib = (runnable_contrib * scale_freq)
+ >> SCHED_CAPACITY_SHIFT;
if (runnable)
sa->runnable_avg_sum += scaled_runnable_contrib;
@@ -2388,7 +2371,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
}
/* Remainder of delta accrued against u_0` */
- scaled_delta = scale_contrib(delta, scale_factor);
+ scaled_delta = (delta * scale_freq) >> SCHED_CAPACITY_SHIFT;
if (runnable)
sa->runnable_avg_sum += scaled_delta;